From 83b6170ae1a289f309cce8d70e8f6f59cb672aeb Mon Sep 17 00:00:00 2001 From: "James H. Nguyen" Date: Mon, 14 Apr 2025 12:32:22 -0700 Subject: [PATCH 1/2] ci: Update coverage script to use 'tests' directory and simplify execution --- scripts/run_coverage_ci.sh | 310 ++++++++++--------------------------- 1 file changed, 78 insertions(+), 232 deletions(-) diff --git a/scripts/run_coverage_ci.sh b/scripts/run_coverage_ci.sh index 6a78930..8e0a2f9 100755 --- a/scripts/run_coverage_ci.sh +++ b/scripts/run_coverage_ci.sh @@ -22,37 +22,17 @@ if [ -n "$GITHUB_WORKSPACE" ]; then ls -la fi -# Determine test directory -TEST_DIR=${TEST_DIR_ENV:-"test_dir"} +# Define the test directory +TEST_DIR="tests" echo "Using test directory: $TEST_DIR" -# Try different locations if test directory not found +# Check if the test directory exists if [ ! -d "$TEST_DIR" ]; then - echo "Warning: Test directory $TEST_DIR not found in current directory" - - # Try parent directory - if [ -d "../$TEST_DIR" ]; then - TEST_DIR="../$TEST_DIR" - echo "Found test directory in parent directory: $TEST_DIR" - # Try in GitHub workspace - elif [ -n "$GITHUB_WORKSPACE" ] && [ -d "$GITHUB_WORKSPACE/$TEST_DIR" ]; then - TEST_DIR="$GITHUB_WORKSPACE/$TEST_DIR" - echo "Found test directory in GITHUB_WORKSPACE: $TEST_DIR" - # Use find to locate test directory - else - TEST_DIR_FOUND=$(find . -type d -name "test_dir" | head -1) - if [ -n "$TEST_DIR_FOUND" ]; then - TEST_DIR="$TEST_DIR_FOUND" - echo "Found test directory using find: $TEST_DIR" - else - echo "Error: Could not find test directory" - echo "Current directory: $(pwd)" - echo "Available directories:" - ls -la - # Continue anyway to avoid failing the CI - # We'll handle individual files not found later - fi - fi + echo "Error: Test directory '$TEST_DIR' not found!" + echo "Current directory: $(pwd)" + echo "Available directories:" + ls -la + exit 1 # Fail the script if the main test directory is missing fi # Set timeout duration (in seconds) from environment variable or use default @@ -93,222 +73,88 @@ handle_test_error() { find . -name "__pycache__" -type d -exec rm -rf {} + 2>/dev/null || true find . -name "*.pyc" -delete -# Run tests in smaller batches with timeouts +# Run tests with coverage enabled +# We can run all tests together now that conflicts are resolved echo "Running test suite with coverage enabled..." -# Define the basic tools tests paths -TOOLS_TESTS=( - "$TEST_DIR/test_file_tools.py" - "$TEST_DIR/test_system_tools.py" - "$TEST_DIR/test_directory_tools.py" - "$TEST_DIR/improved/test_quality_tools.py" - "$TEST_DIR/improved/test_summarizer_tool.py" - "$TEST_DIR/improved/test_tree_tool.py" - "tests/tools/test_base_tool.py" -) - -# Check if tools test files exist -TOOLS_TESTS_EXISTING=() -for TEST_FILE in "${TOOLS_TESTS[@]}"; do - if [ -f "$TEST_FILE" ]; then - TOOLS_TESTS_EXISTING+=("$TEST_FILE") - else - echo "Warning: Test file $TEST_FILE not found" | tee -a "$SUMMARY_LOG" - fi -done - -# First, run the basic tools tests which are known to work -if [ ${#TOOLS_TESTS_EXISTING[@]} -gt 0 ]; then - echo "Running tools tests (known to work well)..." | tee -a "$SUMMARY_LOG" - python -m pytest \ - --cov=src.cli_code \ - --cov-report=xml:coverage.xml \ - --cov-report=html:coverage_html \ - --cov-report=term \ - --timeout=$CI_TIMEOUT \ - "${TOOLS_TESTS_EXISTING[@]}" -else - echo "No tools tests found to run" | tee -a "$SUMMARY_LOG" - # Initialize coverage file to avoid errors - python -m pytest \ - --cov=src.cli_code \ - --cov-report=xml:coverage.xml \ - --cov-report=html:coverage_html \ - --cov-report=term -fi - -# Define model tests paths -MODEL_TESTS=( - "$TEST_DIR/test_models_base.py" - "$TEST_DIR/test_model_basic.py" - "$TEST_DIR/test_model_integration.py" -) - -# Check if model test files exist -MODEL_TESTS_EXISTING=() -for TEST_FILE in "${MODEL_TESTS[@]}"; do - if [ -f "$TEST_FILE" ]; then - MODEL_TESTS_EXISTING+=("$TEST_FILE") - else - echo "Warning: Test file $TEST_FILE not found" | tee -a "$SUMMARY_LOG" - fi -done - -# Now run the model tests separately -if [ ${#MODEL_TESTS_EXISTING[@]} -gt 0 ]; then - echo "Running model tests..." | tee -a "$SUMMARY_LOG" - python -m pytest \ - --cov=src.cli_code \ - --cov-append \ - --cov-report=xml:coverage.xml \ - --cov-report=html:coverage_html \ - --cov-report=term \ - --timeout=$CI_TIMEOUT \ - "${MODEL_TESTS_EXISTING[@]}" -else - echo "No model tests found to run" | tee -a "$SUMMARY_LOG" -fi - -# Track failures -FAILED_TESTS=0 -TIMED_OUT_TESTS=0 - -# Function to run tests with a common pattern -run_test_group() { - GROUP_NAME=$1 - shift - TEST_FILES=("$@") - - echo "Running $GROUP_NAME tests..." | tee -a "$SUMMARY_LOG" - - for test_file in "${TEST_FILES[@]}"; do - # Check if file exists - if [ ! -f "$test_file" ]; then - echo "Warning: Test file $test_file not found, skipping" | tee -a "$SUMMARY_LOG" - continue - fi - - echo "Running $test_file with timeout $CI_TIMEOUT seconds..." | tee -a "$SUMMARY_LOG" - LOG_FILE="$LOG_DIR/$(basename $test_file).log" - - # Run test with timeout and capture output - python -m pytest \ - --cov=src.cli_code \ - --cov-append \ - --timeout=$CI_TIMEOUT \ - "$test_file" > "$LOG_FILE" 2>&1 - - EXIT_CODE=$? - if [ $EXIT_CODE -ne 0 ]; then - if [ $EXIT_CODE -eq 124 ]; then - TIMED_OUT_TESTS=$((TIMED_OUT_TESTS + 1)) - else - FAILED_TESTS=$((FAILED_TESTS + 1)) - fi - handle_test_error "$test_file" "$EXIT_CODE" "$LOG_FILE" - else - echo "✅ $test_file completed successfully" | tee -a "$SUMMARY_LOG" - fi - done -} - -# Run gemini model tests individually -run_test_group "gemini model" \ - "$TEST_DIR/test_gemini_model.py" \ - "$TEST_DIR/test_gemini_model_advanced.py" \ - "$TEST_DIR/test_gemini_model_coverage.py" \ - "$TEST_DIR/test_gemini_model_error_handling.py" - -# Run ollama model tests individually -run_test_group "ollama model" \ - "$TEST_DIR/test_ollama_model.py" \ - "$TEST_DIR/test_ollama_model_advanced.py" \ - "$TEST_DIR/test_ollama_model_coverage.py" \ - "$TEST_DIR/test_ollama_model_context.py" \ - "$TEST_DIR/test_ollama_model_error_handling.py" - -# Run config tests individually -run_test_group "config" \ - "$TEST_DIR/test_config.py" \ - "$TEST_DIR/test_config_comprehensive.py" \ - "$TEST_DIR/test_config_edge_cases.py" \ - "$TEST_DIR/test_config_missing_methods.py" - -# Run main tests individually -run_test_group "main" \ - "$TEST_DIR/test_main.py" \ - "$TEST_DIR/test_main_comprehensive.py" \ - "$TEST_DIR/test_main_edge_cases.py" \ - "$TEST_DIR/test_main_improved.py" - -# Run remaining tests individually -run_test_group "remaining" \ - "$TEST_DIR/test_task_complete_tool.py" \ - "$TEST_DIR/test_tools_base.py" \ - "$TEST_DIR/test_tools_init_coverage.py" \ - "$TEST_DIR/test_utils.py" \ - "$TEST_DIR/test_utils_comprehensive.py" \ - "$TEST_DIR/test_test_runner_tool.py" \ - "$TEST_DIR/test_basic_functions.py" \ - "$TEST_DIR/test_tools_basic.py" \ - "$TEST_DIR/test_tree_tool_edge_cases.py" - -# Generate a final coverage report -echo "Generating final coverage report..." | tee -a "$SUMMARY_LOG" python -m pytest \ - --cov=src.cli_code \ - --cov-append \ + --cov=src/cli_code \ --cov-report=xml:coverage.xml \ --cov-report=html:coverage_html \ - --cov-report=term + --cov-report=term \ + --timeout=$CI_TIMEOUT \ + "$TEST_DIR" # Run all tests within the tests directory -echo "Coverage report generated in coverage.xml and coverage_html/" | tee -a "$SUMMARY_LOG" +EXIT_CODE=$? -# Print summary of test results -echo "" | tee -a "$SUMMARY_LOG" -echo "Test Summary:" | tee -a "$SUMMARY_LOG" -echo "-------------" | tee -a "$SUMMARY_LOG" -echo "Failed tests: $FAILED_TESTS" | tee -a "$SUMMARY_LOG" -echo "Timed out tests: $TIMED_OUT_TESTS" | tee -a "$SUMMARY_LOG" -echo "Log files available in: $LOG_DIR" | tee -a "$SUMMARY_LOG" -echo "Summary log: $SUMMARY_LOG" | tee -a "$SUMMARY_LOG" - -# Extract overall coverage percentage for GitHub output -if [ -f "coverage.xml" ]; then - echo "✅ coverage.xml file exists" | tee -a "$SUMMARY_LOG" - - # Extract overall coverage percentage - COVERAGE=$(python -c "import xml.etree.ElementTree as ET; tree = ET.parse('coverage.xml'); root = tree.getroot(); line_rate = float(root.attrib['line-rate'])*100; print('{:.2f}%'.format(line_rate))") - echo "Overall coverage percentage: $COVERAGE" | tee -a "$SUMMARY_LOG" - - # Set output for GitHub Actions - if [ -n "$GITHUB_OUTPUT" ]; then - echo "percentage=$COVERAGE" >> $GITHUB_OUTPUT +if [ $EXIT_CODE -ne 0 ]; then + echo "----------------------------------------" | tee -a "$SUMMARY_LOG" + if [ $EXIT_CODE -eq 124 ]; then + echo "⚠️ WARNING: Pytest run TIMED OUT (after $CI_TIMEOUT seconds)" | tee -a "$SUMMARY_LOG" + TIMED_OUT_TESTS=1 + FAILED_TESTS=0 # Treat timeout as a special case, not necessarily failed tests else - echo "Note: GITHUB_OUTPUT not defined, skipping GitHub output" | tee -a "$SUMMARY_LOG" + echo "⚠️ WARNING: Pytest run FAILED with exit code $EXIT_CODE" | tee -a "$SUMMARY_LOG" + FAILED_TESTS=1 + TIMED_OUT_TESTS=0 fi + echo "Check logs in $LOG_DIR for details." | tee -a "$SUMMARY_LOG" + echo "----------------------------------------" | tee -a "$SUMMARY_LOG" else - echo "❌ coverage.xml file not generated!" | tee -a "$SUMMARY_LOG" - if [ -n "$GITHUB_OUTPUT" ]; then - echo "percentage=0.00%" >> $GITHUB_OUTPUT - fi + echo "✅ Pytest run completed successfully" | tee -a "$SUMMARY_LOG" + FAILED_TESTS=0 + TIMED_OUT_TESTS=0 fi -echo "Coverage generation for CI completed." | tee -a "$SUMMARY_LOG" - -# Use the CI_EXIT_ON_TEST_FAILURE value set at the beginning of the script (=1) -# to determine whether to exit with an error code on test failures - -if [ $FAILED_TESTS -gt 0 -o $TIMED_OUT_TESTS -gt 0 ]; then - echo "Test run had $FAILED_TESTS failing tests and $TIMED_OUT_TESTS timed out tests" | tee -a "$SUMMARY_LOG" - - if [ -n "$CI" ] && [ "$CI_EXIT_ON_TEST_FAILURE" = "1" ]; then - echo "Exiting with error code due to test failures" | tee -a "$SUMMARY_LOG" - exit 1 - else - echo "Warning: Tests failed but continuing (CI_EXIT_ON_TEST_FAILURE=$CI_EXIT_ON_TEST_FAILURE)" | tee -a "$SUMMARY_LOG" - fi +# Final Summary +echo "=======================================" | tee -a "$SUMMARY_LOG" +echo "Test Run Summary:" | tee -a "$SUMMARY_LOG" +echo "- Failed Tests: $FAILED_TESTS" | tee -a "$SUMMARY_LOG" +echo "- Timed Out Tests: $TIMED_OUT_TESTS" | tee -a "$SUMMARY_LOG" +echo "Test run finished at $(date)" | tee -a "$SUMMARY_LOG" +echo "=======================================" | tee -a "$SUMMARY_LOG" + +# Exit with appropriate code +if [ $FAILED_TESTS -gt 0 ] && [ "$CI_EXIT_ON_TEST_FAILURE" = "1" ]; then + echo "Exiting with failure code due to test failures." + exit 1 +elif [ $TIMED_OUT_TESTS -gt 0 ]; then + echo "Exiting with failure code due to test timeouts." + exit 1 # Or a different code if desired for timeouts fi -# If we made it here, exit successfully +echo "Coverage generation script completed." exit 0 + +# Old logic for running tests individually (Removed) +# # Function to run tests with a common pattern +# run_test_group() { +# ... +# } +# +# # Run gemini model tests individually +# run_test_group "gemini model" \ +# "tests/models/test_gemini.py" \ +# ... +# +# # Run ollama model tests individually +# run_test_group "ollama model" \ +# "tests/models/test_ollama.py" \ +# ... +# +# # Run config tests individually +# run_test_group "config" \ +# "tests/test_config.py" # Assuming config tests are at root of tests? +# ... +# +# # Run main tests individually +# run_test_group "main" \ +# "tests/test_main.py" \ +# ... +# +# # Run remaining tests individually +# run_test_group "remaining" \ +# "tests/tools/test_task_complete_tool.py" \ +# "tests/tools/test_base_tool.py" \ +# "tests/test_utils.py" # Assuming utils test is at root of tests? +# ... From bc8814fc8f0f954da2e708a0815f979ae3464b25 Mon Sep 17 00:00:00 2001 From: "James H. Nguyen" Date: Mon, 14 Apr 2025 12:47:03 -0700 Subject: [PATCH 2/2] refactor: Remove remaining references to test_dir --- .github/workflows/python-ci.yml | 14 ++---- README.md | 6 +-- docs/CODE_COVERAGE.md | 4 +- docs/TESTING.md | 6 +-- docs/architecture.md | 2 +- docs/contributing.md | 8 ++-- scripts/find_hanging_tests.sh | 2 +- scripts/memory_backup.json | 2 +- scripts/run_api_tests.sh | 6 +-- scripts/run_specific_coverage.sh | 2 +- scripts/run_targeted_coverage.sh | 61 ++++++++++------------- scripts/run_tests_with_coverage.py | 2 +- scripts/test_coverage_local.sh | 77 ++++++++++++++---------------- scripts/test_runner_coverage.sh | 6 +-- scripts/tools_coverage.sh | 65 +++++++++++-------------- sonar-project.properties | 2 +- 16 files changed, 121 insertions(+), 144 deletions(-) diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml index e3dcdea..63617ca 100644 --- a/.github/workflows/python-ci.yml +++ b/.github/workflows/python-ci.yml @@ -50,20 +50,16 @@ jobs: echo "Current directory: $(pwd)" echo "Directory contents:" ls -la - echo "Test directory content:" - if [ -d "test_dir" ]; then - ls -la test_dir/ + echo "Test directory content (tests/):" + if [ -d "tests" ]; then + ls -la tests/ else - echo "test_dir not found in current directory!" - echo "Looking for test_dir in other locations:" - find . -type d -name "test_dir" + echo "tests directory not found!" fi - name: Lint with Ruff (check) run: | - # Run linting but don't fail on errors in test files - ruff check --fix --verbose --preview --exclude "test_dir/*" . - ruff check --fix --verbose --preview --exit-zero test_dir/ + ruff check --fix --verbose --preview . - name: Lint with Ruff (format) run: | diff --git a/README.md b/README.md index 3296c56..c3c0ec9 100644 --- a/README.md +++ b/README.md @@ -248,11 +248,11 @@ When running tests, use these approaches for better control and reliability: ```bash # Run specific test files -python -m pytest test_dir/test_ollama_model_context.py +python -m pytest tests/models/test_ollama_model_context.py # Run specific test classes or methods -python -m pytest test_dir/test_ollama_model_context.py::TestOllamaModelContext -python -m pytest test_dir/test_ollama_model_context.py::TestOllamaModelContext::test_clear_history +python -m pytest tests/models/test_ollama_model_context.py::TestOllamaModelContext +python -m pytest tests/models/test_ollama_model_context.py::TestOllamaModelContext::test_clear_history # Use pattern matching with -k to select specific tests python -m pytest -k "tree_tool or ollama_context" diff --git a/docs/CODE_COVERAGE.md b/docs/CODE_COVERAGE.md index bff365f..d3541e9 100644 --- a/docs/CODE_COVERAGE.md +++ b/docs/CODE_COVERAGE.md @@ -28,10 +28,10 @@ If you prefer to run coverage manually, use the following commands: ```bash # Run pytest with coverage -python -m pytest --cov=src/cli_code --cov-report=term --cov-report=xml --cov-report=html test_dir/ +python -m pytest --cov=src/cli_code --cov-report=term --cov-report=xml --cov-report=html tests/ # To run specific test files -python -m pytest --cov=src/cli_code --cov-report=term test_dir/test_file.py +python -m pytest --cov=src/cli_code --cov-report=term tests/tools/test_file_tools.py ``` ## Analyzing Coverage Results diff --git a/docs/TESTING.md b/docs/TESTING.md index 391fc51..3164b98 100644 --- a/docs/TESTING.md +++ b/docs/TESTING.md @@ -15,7 +15,7 @@ This document provides guidelines and best practices for writing and maintaining Tests are organized in two main directories: - `test_dir/`: Contains most test files -- `tests/`: Contains specialized test directories (e.g., `models/`, `tools/`) +- `tests/`: Contains all test files, organized by module (e.g., `tests/models`, `tests/tools`). Test file naming follows these conventions: @@ -42,10 +42,10 @@ python -m pytest --cov=src ```bash # Run tests in a specific file -python -m pytest test_dir/test_gemini_model.py +python -m pytest tests/models/test_gemini.py # Run a specific test -python -m pytest test_dir/test_gemini_model.py::test_generate_simple_text_response +python -m pytest tests/models/test_gemini.py::test_generate_simple_text_response ``` ## Mock Objects and API Interactions diff --git a/docs/architecture.md b/docs/architecture.md index 9794e5c..0a66c34 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -166,7 +166,7 @@ flowchart TD * **Sophisticated Planning**: Consider if needed beyond system prompts. * **Asynchronous Operations**: Evaluate for long-running tools. * **State Management**: Assess if needed for more complex multi-turn tasks. -* **Testing**: Expand test suite (`test_dir`) to cover both providers, mock API interactions, and test the agent selection logic. +* **Testing**: Expand test suite (`tests/`) to cover both providers, mock API interactions, and test the agent selection logic. * **Tool Schema Validation/Translation**: Ensure robust handling of schema differences between Gemini and OpenAI formats. * **Summarizer Tool Integration**: Clarify registration/usage. diff --git a/docs/contributing.md b/docs/contributing.md index 67cea7b..3d4ffef 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -9,7 +9,7 @@ Thank you for your interest in contributing to CLI-Code! This document outlines - **Run local SonarCloud scan**: Get a baseline of current code quality and coverage ```bash # Generate coverage report - pytest --cov=src test_dir --cov-report=xml + pytest --cov=src tests --cov-report=xml # Run local SonarCloud scan sonar-scanner -Dsonar.login=YOUR_SONARCLOUD_TOKEN @@ -32,7 +32,7 @@ Thank you for your interest in contributing to CLI-Code! This document outlines - Ensure overall code coverage does not decrease - Run the test suite frequently during development: ```bash - pytest --cov=src test_dir + pytest --cov=src tests ``` ### 5. Verification @@ -41,7 +41,7 @@ Thank you for your interest in contributing to CLI-Code! This document outlines - Run a final local SonarCloud scan to verify quality improvements: ```bash # Generate final coverage report - pytest --cov=src test_dir --cov-report=xml + pytest --cov=src tests --cov-report=xml # Run local SonarCloud scan sonar-scanner -Dsonar.login=YOUR_SONARCLOUD_TOKEN @@ -94,7 +94,7 @@ For the fastest feedback loop, run SonarCloud analysis locally before pushing ch 2. Generate coverage report: ```bash - pytest --cov=src test_dir --cov-report=xml + pytest --cov=src tests --cov-report=xml ``` 3. Run local scan (requires your SonarCloud token): diff --git a/scripts/find_hanging_tests.sh b/scripts/find_hanging_tests.sh index cc83497..da994ec 100755 --- a/scripts/find_hanging_tests.sh +++ b/scripts/find_hanging_tests.sh @@ -45,7 +45,7 @@ run_test_with_timeout() { } # Determine test directory -TEST_DIR="test_dir" +TEST_DIR="tests" # Check if TEST_DIR environment variable is set if [ -n "$TEST_DIR_ENV" ]; then diff --git a/scripts/memory_backup.json b/scripts/memory_backup.json index ee98472..df4cf4c 100644 --- a/scripts/memory_backup.json +++ b/scripts/memory_backup.json @@ -104,7 +104,7 @@ "name": "SonarCloud Analysis Process", "entityType": "TechnicalProcess", "observations": [ - "Generate coverage report with: pytest --cov=src test_dir --cov-report=xml", + "Generate coverage report with: pytest --cov=src tests --cov-report=xml", "Run local SonarCloud scan with: sonar-scanner -Dsonar.login=YOUR_SONARCLOUD_TOKEN or use environment variable", "Local analysis allows for faster feedback loop before pushing changes", "GitHub Actions workflow automatically runs scans on push", diff --git a/scripts/run_api_tests.sh b/scripts/run_api_tests.sh index 3647130..f1f2793 100755 --- a/scripts/run_api_tests.sh +++ b/scripts/run_api_tests.sh @@ -8,9 +8,9 @@ echo "Running API-dependent tests with a 10-second timeout per test..." # Run API-dependent tests python -m pytest \ - test_dir/test_ollama_model.py \ - test_dir/test_gemini_model.py \ - test_dir/test_model_integration.py \ + tests/models/test_ollama.py \ + tests/models/test_gemini.py \ + tests/models/test_model_integration.py \ -v \ --timeout=10 diff --git a/scripts/run_specific_coverage.sh b/scripts/run_specific_coverage.sh index a17e83b..a7b9f6a 100755 --- a/scripts/run_specific_coverage.sh +++ b/scripts/run_specific_coverage.sh @@ -19,7 +19,7 @@ echo "Running comprehensive coverage for $MODULE..." coverage erase # Find all test files that might test this module -TEST_FILES=$(find tests test_dir -name "test_*.py" -type f -exec grep -l "$MODULE" {} \;) +TEST_FILES=$(find tests -name "test_*.py" -type f -exec grep -l "$MODULE" {} \;) if [ -z "$TEST_FILES" ]; then echo "No test files found for $MODULE" diff --git a/scripts/run_targeted_coverage.sh b/scripts/run_targeted_coverage.sh index df973e6..4e83404 100755 --- a/scripts/run_targeted_coverage.sh +++ b/scripts/run_targeted_coverage.sh @@ -1,53 +1,44 @@ #!/bin/bash -# run_targeted_coverage.sh - Run coverage tests for specific modules +# Script to run targeted coverage reports for specific modules -# Set error handling set -e +echo "Running targeted coverage analysis..." -echo "Running coverage for key modules..." - -# Clear existing coverage data first to avoid mixed results +# Clear previous coverage data coverage erase -# Test runner tool - Run test files separately to avoid import conflicts -echo "==== Testing test_runner.py (test_dir tests) ====" -python -m pytest test_dir/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-report=term - +# --- Test Runner --- echo "==== Testing test_runner.py (tests/tools tests) ====" -python -m pytest tests/tools/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-append --cov-report=term +python -m pytest tests/tools/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-report=term # Add test_runner coverage to main report coverage combine -# Gemini model -echo "==== Testing gemini.py ====" -python -m pytest test_dir/test_gemini_model*.py -v --cov=src.cli_code.models.gemini --cov-report=term +# --- Models --- +echo "==== Testing Gemini models (tests/models tests) ====" +python -m pytest tests/models/test_gemini*.py -v --cov=src.cli_code.models.gemini --cov-report=term -# Ollama model -echo "==== Testing ollama.py ====" -python -m pytest test_dir/test_ollama_model*.py -v --cov=src.cli_code.models.ollama --cov-report=term +echo "==== Testing Ollama models (tests/models tests) ====" +python -m pytest tests/models/test_ollama*.py -v --cov=src.cli_code.models.ollama --cov-report=term -# File tools -echo "==== Testing file_tools.py ====" -python -m pytest test_dir/test_file_tools.py -v --cov=src.cli_code.tools.file_tools --cov-report=term +# --- Specific Tools --- +echo "==== Testing File Tools (tests/tools tests) ====" +python -m pytest tests/tools/test_file_tools.py -v --cov=src.cli_code.tools.file_tools --cov-report=term -# Tree tool -echo "==== Testing tree_tool.py ====" -python -m pytest test_dir/test_tree_tool.py -v --cov=src.cli_code.tools.tree_tool --cov-report=term -python -m pytest test_dir/test_tree_tool_edge_cases.py -v --cov=src.cli_code.tools.tree_tool --cov-append --cov-report=term +echo "==== Testing Tree Tool (tests/tools tests) ====" +python -m pytest tests/tools/test_tree_tool.py -v --cov=src.cli_code.tools.tree_tool --cov-report=term +python -m pytest tests/tools/test_tree_tool_edge_cases.py -v --cov=src.cli_code.tools.tree_tool --cov-append --cov-report=term -# Task complete tool -echo "==== Testing task_complete_tool.py ====" -python -m pytest test_dir/test_task_complete_tool.py -v --cov=src.cli_code.tools.task_complete_tool --cov-report=term -python -m pytest tests/tools/test_task_complete_tool.py -v --cov=src.cli_code.tools.task_complete_tool --cov-append --cov-report=term +echo "==== Testing Task Complete Tool (tests/tools tests) ====" +python -m pytest tests/tools/test_task_complete_tool.py -v --cov=src.cli_code.tools.task_complete_tool --cov-report=term -# Run tests for all remaining tools to get comprehensive coverage -echo "==== Testing other tools ====" -python -m pytest test_dir/test_tools_basic.py test_dir/test_tools_init_coverage.py test_dir/test_directory_tools.py test_dir/test_quality_tools.py test_dir/test_summarizer_tool.py -v --cov=src.cli_code.tools --cov-report=term +# --- Other Tools --- +echo "==== Testing Other Tools (tests/tools tests) ====" +# Note: test_tools_init_coverage might be in root tests folder +python -m pytest tests/tools/test_tools_basic.py tests/tools/test_directory_tools.py tests/tools/test_quality_tools.py tests/tools/test_summarizer_tool.py tests/test_tools_init_coverage.py -v --cov=src.cli_code.tools --cov-report=term -# Generate a complete coverage report at the end -coverage combine -coverage report -coverage html +echo "==== Targeted coverage tests complete ====" -echo "Targeted coverage complete!" \ No newline at end of file +# Optional: Generate combined report at the end (though individual reports printed above) +# coverage combine +# coverage report -m \ No newline at end of file diff --git a/scripts/run_tests_with_coverage.py b/scripts/run_tests_with_coverage.py index e415efb..7571b2a 100755 --- a/scripts/run_tests_with_coverage.py +++ b/scripts/run_tests_with_coverage.py @@ -60,7 +60,7 @@ def main(): cmd.append("-v") # Run tests - result = subprocess.run(cmd + ["test_dir/"], check=False) + result = subprocess.run(cmd + ["tests/"], check=False) if result.returncode != 0: print("\n⚠️ Some tests failed! See above for details.") diff --git a/scripts/test_coverage_local.sh b/scripts/test_coverage_local.sh index c934188..f3b5edf 100755 --- a/scripts/test_coverage_local.sh +++ b/scripts/test_coverage_local.sh @@ -10,15 +10,12 @@ echo "Starting local test coverage generation..." mkdir -p coverage_html # Determine test directory -TEST_DIR=${TEST_DIR_ENV:-"test_dir"} +TEST_DIR=${TEST_DIR_ENV:-"tests"} echo "Using test directory: $TEST_DIR" -# Verify test directory exists +# Check if the test directory exists if [ ! -d "$TEST_DIR" ]; then echo "Error: Test directory $TEST_DIR does not exist!" - echo "Current directory: $(pwd)" - echo "Available directories:" - ls -la exit 1 fi @@ -65,12 +62,12 @@ echo "Running test suite with coverage enabled..." # Define the basic tools tests paths TOOLS_TESTS=( - "$TEST_DIR/test_file_tools.py" - "$TEST_DIR/test_system_tools.py" - "$TEST_DIR/test_directory_tools.py" - "$TEST_DIR/improved/test_quality_tools.py" - "$TEST_DIR/improved/test_summarizer_tool.py" - "$TEST_DIR/improved/test_tree_tool.py" + "tests/tools/test_file_tools.py" + "tests/tools/test_system_tools.py" + "tests/tools/test_directory_tools.py" + "tests/tools/test_quality_tools.py" + "tests/tools/test_summarizer_tool.py" + "tests/tools/test_tree_tool.py" "tests/tools/test_base_tool.py" ) @@ -100,9 +97,9 @@ fi # Define model tests paths MODEL_TESTS=( - "$TEST_DIR/test_models_base.py" - "$TEST_DIR/test_model_basic.py" - "$TEST_DIR/test_model_integration.py" + "tests/models/test_base.py" + "tests/models/test_model_basic.py" + "tests/models/test_model_integration.py" ) # Check if model test files exist @@ -175,44 +172,44 @@ run_test_group() { # Run gemini model tests individually run_test_group "gemini model" \ - "$TEST_DIR/test_gemini_model.py" \ - "$TEST_DIR/test_gemini_model_advanced.py" \ - "$TEST_DIR/test_gemini_model_coverage.py" \ - "$TEST_DIR/test_gemini_model_error_handling.py" + "tests/models/test_gemini.py" \ + "tests/models/test_gemini_model_advanced.py" \ + "tests/models/test_gemini_model_coverage.py" \ + "tests/models/test_gemini_model_error_handling.py" # Run ollama model tests individually run_test_group "ollama model" \ - "$TEST_DIR/test_ollama_model.py" \ - "$TEST_DIR/test_ollama_model_advanced.py" \ - "$TEST_DIR/test_ollama_model_coverage.py" \ - "$TEST_DIR/test_ollama_model_context.py" \ - "$TEST_DIR/test_ollama_model_error_handling.py" + "tests/models/test_ollama.py" \ + "tests/models/test_ollama_model_advanced.py" \ + "tests/models/test_ollama_model_coverage.py" \ + "tests/models/test_ollama_model_context.py" \ + "tests/models/test_ollama_model_error_handling.py" # Run config tests individually run_test_group "config" \ - "$TEST_DIR/test_config.py" \ - "$TEST_DIR/test_config_comprehensive.py" \ - "$TEST_DIR/test_config_edge_cases.py" \ - "$TEST_DIR/test_config_missing_methods.py" + "tests/test_config.py" \ + "tests/test_config_comprehensive.py" \ + "tests/test_config_edge_cases.py" \ + "tests/test_config_missing_methods.py" # Run main tests individually run_test_group "main" \ - "$TEST_DIR/test_main.py" \ - "$TEST_DIR/test_main_comprehensive.py" \ - "$TEST_DIR/test_main_edge_cases.py" \ - "$TEST_DIR/test_main_improved.py" + "tests/test_main.py" \ + "tests/test_main_comprehensive.py" \ + "tests/test_main_edge_cases.py" \ + "tests/test_main_improved.py" # Run remaining tests individually run_test_group "remaining" \ - "$TEST_DIR/test_task_complete_tool.py" \ - "$TEST_DIR/test_tools_base.py" \ - "$TEST_DIR/test_tools_init_coverage.py" \ - "$TEST_DIR/test_utils.py" \ - "$TEST_DIR/test_utils_comprehensive.py" \ - "$TEST_DIR/test_test_runner_tool.py" \ - "$TEST_DIR/test_basic_functions.py" \ - "$TEST_DIR/test_tools_basic.py" \ - "$TEST_DIR/test_tree_tool_edge_cases.py" + "tests/tools/test_task_complete_tool.py" \ + "tests/tools/test_base_tool.py" \ + "tests/test_tools_init_coverage.py" + "tests/test_utils.py" \ + "tests/test_utils_comprehensive.py" \ + "tests/tools/test_test_runner_tool.py" \ + "tests/test_basic_functions.py" + "tests/tools/test_tools_basic.py" + "tests/tools/test_tree_tool_edge_cases.py" # Generate a final coverage report echo "Generating final coverage report..." | tee -a "$SUMMARY_LOG" diff --git a/scripts/test_runner_coverage.sh b/scripts/test_runner_coverage.sh index a04df5f..74de3cb 100755 --- a/scripts/test_runner_coverage.sh +++ b/scripts/test_runner_coverage.sh @@ -11,10 +11,10 @@ coverage erase # Run all test_runner.py tests with coverage echo "=== Running tests/tools/test_test_runner_tool.py ===" -python -m pytest tests/tools/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-report=term +python -m pytest tests/tools/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-append --cov-report=term -echo "=== Running test_dir/test_test_runner_tool.py ===" -python -m pytest test_dir/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-append --cov-report=term +echo "=== Running tests/tools/test_test_runner_tool.py ===" +python -m pytest tests/tools/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-append --cov-report=term # Combine coverage data and generate reports echo "=== Generating combined coverage report ===" diff --git a/scripts/tools_coverage.sh b/scripts/tools_coverage.sh index b8dd1aa..8a33905 100755 --- a/scripts/tools_coverage.sh +++ b/scripts/tools_coverage.sh @@ -1,52 +1,45 @@ #!/bin/bash -# tools_coverage.sh - Run comprehensive coverage for all tool modules +# Script to run coverage specifically for the tools module set -e +echo "Running coverage analysis for tools..." -echo "Running comprehensive tools coverage..." - -# Clean any existing coverage data +# Clear previous coverage data coverage erase -# Test runner tool -echo "=== Running test_runner.py tests ===" -python -m pytest tests/tools/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner +# Run tests for each tool, appending coverage data +echo "Testing file_tools..." +python -m pytest tests/tools/test_file_tools.py -v --cov=src.cli_code.tools.file_tools --cov-append -# Task complete tool -echo "=== Running task_complete_tool.py tests ===" -python -m pytest tests/tools/test_task_complete_tool.py -v --cov=src.cli_code.tools.task_complete_tool --cov-append +echo "Testing directory_tools..." +python -m pytest tests/tools/test_directory_tools.py -v --cov=src.cli_code.tools.directory_tools --cov-append -# File tools -echo "=== Running file_tools.py tests ===" -python -m pytest test_dir/test_file_tools.py -v --cov=src.cli_code.tools.file_tools --cov-append +echo "Testing quality_tools..." +python -m pytest tests/tools/test_quality_tools.py -v --cov=src.cli_code.tools.quality_tools --cov-append -# Directory tools -echo "=== Running directory_tools.py tests ===" -python -m pytest test_dir/test_directory_tools.py -v --cov=src.cli_code.tools.directory_tools --cov-append +echo "Testing summarizer_tool..." +python -m pytest tests/tools/test_summarizer_tool.py -v --cov=src.cli_code.tools.summarizer_tool --cov-append -# Quality tools -echo "=== Running quality_tools.py tests ===" -python -m pytest test_dir/improved/test_quality_tools.py -v --cov=src.cli_code.tools.quality_tools --cov-append +echo "Testing tree_tool..." +python -m pytest tests/tools/test_tree_tool.py tests/tools/test_tree_tool_edge_cases.py -v --cov=src.cli_code.tools.tree_tool --cov-append -# Summarizer tool -echo "=== Running summarizer_tool.py tests ===" -python -m pytest test_dir/improved/test_summarizer_tool.py -v --cov=src.cli_code.tools.summarizer_tool --cov-append +echo "Testing system_tools..." +python -m pytest tests/tools/test_system_tools.py -v --cov=src.cli_code.tools.system_tools --cov-append -# Tree tool -echo "=== Running tree_tool.py tests ===" -python -m pytest test_dir/improved/test_tree_tool.py test_dir/test_tree_tool_edge_cases.py -v --cov=src.cli_code.tools.tree_tool --cov-append +echo "Testing base_tool and init..." +# Assuming test_tools_init_coverage is in root tests +python -m pytest tests/tools/test_base_tool.py tests/test_tools_init_coverage.py -v --cov=src.cli_code.tools.base --cov=src.cli_code.tools.__init__ --cov-append -# System tools -echo "=== Running system_tools.py tests ===" -python -m pytest test_dir/test_system_tools.py test_dir/test_tools_basic.py::TestSystemTools -v --cov=src.cli_code.tools.system_tools --cov-append +echo "Testing task_complete_tool..." +python -m pytest tests/tools/test_task_complete_tool.py -v --cov=src.cli_code.tools.task_complete_tool --cov-append -# Base tool class -echo "=== Running base.py tests ===" -python -m pytest test_dir/test_tools_init_coverage.py tests/tools/test_base_tool.py -v --cov=src.cli_code.tools.base --cov-append +echo "Testing test_runner_tool..." +python -m pytest tests/tools/test_test_runner_tool.py -v --cov=src.cli_code.tools.test_runner --cov-append -# Generate comprehensive report -echo "=== Generating comprehensive coverage report ===" -coverage report --include="src/cli_code/tools/*.py" -coverage html +# Generate final report for the tools module +echo "Generating final report for tools module..." +coverage combine +coverage report --include="src/cli_code/tools/*" +coverage html --include="src/cli_code/tools/*" -echo "Tools coverage complete. Check coverage_html/index.html for detailed report." \ No newline at end of file +echo "Tools coverage complete!" \ No newline at end of file diff --git a/sonar-project.properties b/sonar-project.properties index 43bb6e9..9f21ba4 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -17,7 +17,7 @@ sonar.python.coverage.reportPaths=coverage.xml # Configure test coverage exclusions # ollama.py is excluded as it primarily contains integration code with external dependencies # that is difficult to test without mocking the entire Ollama API -sonar.coverage.exclusions=test_dir/**/*,tests/**/*,src/cli_code/models/ollama.py +sonar.coverage.exclusions=tests/**/*,src/cli_code/models/ollama.py # Force SonarCloud to see all files as new code to get proper coverage metrics # Note: We're using this temporarily to establish accurate baseline coverage