diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..9ffd37b1 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,37 @@ +## Description + + + +## Related Issues + + + +## Type of Change + + + +- [ ] `[Fix]` - Bug fix (non-breaking change fixing an issue) +- [ ] `[Feature]` - New feature (non-breaking change adding functionality) +- [ ] `[Refactor]` - Code refactoring (no functional changes) +- [ ] `[Release]` - Release preparation +- [ ] `[Hotfix]` - Critical fix for production + +## How was this solved? + + + +## Checklist + +- [ ] PR title includes appropriate tag: `[Fix]`, `[Feature]`, `[Refactor]`, `[Release]`, or `[Hotfix]` +- [ ] Linked to related issue (if applicable) +- [ ] Code passes `make check` (lint, format, isort) +- [ ] Tests added/updated for changes (if applicable) +- [ ] Documentation updated (if applicable) + +## Testing + + + +## Additional Notes + + diff --git a/.github/auto-label-config.yml b/.github/auto-label-config.yml new file mode 100644 index 00000000..3ffc2ac0 --- /dev/null +++ b/.github/auto-label-config.yml @@ -0,0 +1,41 @@ +# ============================================================================= +# Auto-Label Configuration +# ============================================================================= +# +# Used by: auto-label-prs.yml +# +# OUTPUT: +# PRs get labels based on: +# - Title prefix: [Fix] → "Fix" label +# - Changed files: docs/** → "documentation" label +# - Branch name: dependabot/* → "dependencies" label +# +# ============================================================================= + +autolabeler: + - label: 'documentation' + files: + - 'docs/**/*' + - '*.md' + branch: + - '/docs?\/.+/' + - label: 'Fix' + title: + - '/^\[Fix\]/i' + - label: 'Feature' + title: + - '/^\[Feature\]/i' + - label: 'Refactor' + title: + - '/^\[Refactor\]/i' + - label: 'Release' + title: + - '/^\[Release\]/i' + - label: 'Hotfix' + title: + - '/^\[Hotfix\]/i' + - label: 'dependencies' + files: + - 'pyproject.toml' + branch: + - '/dependabot\/.+/' diff --git a/.github/draft-changelog-config.yml b/.github/draft-changelog-config.yml new file mode 100644 index 00000000..90a0fde0 --- /dev/null +++ b/.github/draft-changelog-config.yml @@ -0,0 +1,67 @@ +# ============================================================================= +# Draft Changelog Configuration +# ============================================================================= +# +# Used by: draft-changelog.yml +# +# OUTPUT: +# A draft release in GitHub Releases containing: +# - Title: v5.1.0 (based on version-resolver) +# - Body: Grouped list of PRs (based on categories) +# +# ============================================================================= + +name-template: 'v$RESOLVED_VERSION' +tag-template: 'v$RESOLVED_VERSION' + +template: | + ## What's Changed + + $CHANGES + + **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION + +categories: + - title: 'New Features' + labels: + - 'Feature' + - 'enhancement' + - title: 'Bug Fixes' + labels: + - 'Fix' + - 'bug' + - 'Hotfix' + - title: 'Refactoring' + labels: + - 'Refactor' + - 'refactoring' + - title: 'Documentation' + labels: + - 'documentation' + - 'docs' + - title: 'Maintenance' + labels: + - 'maintenance' + - 'dependencies' + - 'Release' + +version-resolver: + major: + labels: + - 'major' + - 'breaking' + minor: + labels: + - 'minor' + - 'Feature' + patch: + labels: + - 'patch' + - 'Fix' + - 'Hotfix' + default: patch + +exclude-labels: + - 'skip-changelog' + - 'duplicate' + - 'invalid' diff --git a/.github/workflows/auto-label-prs.yml b/.github/workflows/auto-label-prs.yml new file mode 100644 index 00000000..280c8974 --- /dev/null +++ b/.github/workflows/auto-label-prs.yml @@ -0,0 +1,35 @@ +# ============================================================================= +# Auto-Label PRs +# ============================================================================= +# +# TRIGGER: +# When a PR is opened or updated +# +# OUTPUT: +# PR gets a label based on its title prefix: +# - [Fix] → "Fix" label +# - [Feature] → "Feature" label +# - [Refactor] → "Refactor" label +# - etc. +# +# ============================================================================= + +name: Auto-Label PRs + +on: + pull_request: + types: [opened, reopened, synchronize] + +permissions: + contents: read + pull-requests: write + +jobs: + add_labels: + runs-on: ubuntu-latest + steps: + - uses: release-drafter/release-drafter@v6 + with: + config-name: auto-label-config.yml + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..09d0e4f7 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,90 @@ +name: Documentation + +on: + push: + branches: + - main + - dev + paths: + - 'docs/**' + - 'src/**' + - '.github/workflows/docs.yml' + pull_request: + branches: + - main + - dev + paths: + - 'docs/**' + - 'src/**' + - '.github/workflows/docs.yml' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + test-snippets: + name: test-doc-snippets + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.10', '3.11', '3.12'] + fail-fast: false + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e ".[all_extras,test]" + python -m pip install --no-cache-dir -r docs/requirements.txt + + - name: Show dependencies + run: python -m pip list + + - name: Test documentation snippets + run: | + python -m pytest docs/tests/test_doc_snippets.py -v --tb=short + + build-docs: + name: build-docs + runs-on: ubuntu-latest + needs: test-snippets + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e ".[all_extras]" + python -m pip install --no-cache-dir -r docs/requirements.txt + + - name: Show dependencies + run: python -m pip list + + - name: Build documentation + run: | + cd docs && sphinx-build -b html source build/html -W --keep-going + + - name: Run doctest + run: | + cd docs && sphinx-build -b doctest source build/doctest || true + + - name: Upload documentation artifact + uses: actions/upload-artifact@v4 + with: + name: docs-html + path: docs/build/html/ + retention-days: 7 diff --git a/.github/workflows/draft-changelog.yml b/.github/workflows/draft-changelog.yml new file mode 100644 index 00000000..b4e245c7 --- /dev/null +++ b/.github/workflows/draft-changelog.yml @@ -0,0 +1,39 @@ +# ============================================================================= +# Draft Changelog +# ============================================================================= +# +# TRIGGER: +# When code is merged/pushed to main +# +# OUTPUT: +# A draft release in GitHub Releases section containing: +# - Grouped list of merged PRs (Features, Bug Fixes, etc.) +# - Suggested next version number +# +# DOES NOT: +# - Publish the release (you must click "Publish" manually) +# - Create git tags +# - Upload to PyPI +# +# ============================================================================= + +name: Draft Changelog + +on: + push: + branches: + - main + +permissions: + contents: read + pull-requests: write + +jobs: + update_draft: + runs-on: ubuntu-latest + steps: + - uses: release-drafter/release-drafter@v6 + with: + config-name: draft-changelog-config.yml + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5476c0cf..b8a90b68 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,25 +15,118 @@ concurrency: cancel-in-progress: true jobs: + # =========================================================================== + # STAGE 0: Detect what changed (runs immediately) + # =========================================================================== + detect-changes: + name: detect-changes + runs-on: ubuntu-latest + outputs: + core: ${{ steps.filter.outputs.core }} + sklearn: ${{ steps.filter.outputs.sklearn }} + sktime: ${{ steps.filter.outputs.sktime }} + skpro: ${{ steps.filter.outputs.skpro }} + utils: ${{ steps.filter.outputs.utils }} + docs: ${{ steps.filter.outputs.docs }} + examples: ${{ steps.filter.outputs.examples }} + any_src: ${{ steps.filter.outputs.any_src }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Detect changed paths + id: filter + run: | + if [ "${{ github.event_name }}" == "push" ]; then + BASE_SHA="${{ github.event.before }}" + # Handle initial push where before is all zeros + if [ "$BASE_SHA" == "0000000000000000000000000000000000000000" ]; then + BASE_SHA="HEAD~1" + fi + else + BASE_SHA="${{ github.event.pull_request.base.sha }}" + fi + + CHANGED_FILES=$(git diff --name-only "$BASE_SHA" "${{ github.sha }}" || echo "") + echo "Changed files:" + echo "$CHANGED_FILES" + + # Core: base module, main hyperactive files, or general tests + if echo "$CHANGED_FILES" | grep -qE "^src/hyperactive/(base|tests|__init__|hyperactive)"; then + echo "core=true" >> $GITHUB_OUTPUT + else + echo "core=false" >> $GITHUB_OUTPUT + fi + + # sklearn integration + if echo "$CHANGED_FILES" | grep -q "^src/hyperactive/integrations/sklearn"; then + echo "sklearn=true" >> $GITHUB_OUTPUT + else + echo "sklearn=false" >> $GITHUB_OUTPUT + fi + + # sktime integration + if echo "$CHANGED_FILES" | grep -q "^src/hyperactive/integrations/sktime"; then + echo "sktime=true" >> $GITHUB_OUTPUT + else + echo "sktime=false" >> $GITHUB_OUTPUT + fi + + # skpro integration + if echo "$CHANGED_FILES" | grep -q "^src/hyperactive/integrations/skpro"; then + echo "skpro=true" >> $GITHUB_OUTPUT + else + echo "skpro=false" >> $GITHUB_OUTPUT + fi + + # utils + if echo "$CHANGED_FILES" | grep -q "^src/hyperactive/utils"; then + echo "utils=true" >> $GITHUB_OUTPUT + else + echo "utils=false" >> $GITHUB_OUTPUT + fi + + # docs + if echo "$CHANGED_FILES" | grep -qE "^docs/|\.rst$|\.md$"; then + echo "docs=true" >> $GITHUB_OUTPUT + else + echo "docs=false" >> $GITHUB_OUTPUT + fi + + # examples + if echo "$CHANGED_FILES" | grep -q "^examples/"; then + echo "examples=true" >> $GITHUB_OUTPUT + else + echo "examples=false" >> $GITHUB_OUTPUT + fi + + # any source code + if echo "$CHANGED_FILES" | grep -q "^src/"; then + echo "any_src=true" >> $GITHUB_OUTPUT + else + echo "any_src=false" >> $GITHUB_OUTPUT + fi + + # =========================================================================== + # STAGE 1: Fast checks (code-quality) + # =========================================================================== code-quality: name: code-quality runs-on: ubuntu-latest steps: - - name: repository checkout step - uses: actions/checkout@v5 + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 - - name: python environment step + - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - - name: install pre-commit - run: python3 -m pip install pre-commit - - - name: Checkout code - uses: actions/checkout@v5 - with: - fetch-depth: 0 + - name: Install pre-commit + run: python -m pip install --no-cache-dir pre-commit - name: Get changed files id: changed-files @@ -53,8 +146,162 @@ jobs: echo "No changed files to check." fi + # =========================================================================== + # STAGE 2: Targeted tests (only for changed areas, fast feedback) + # =========================================================================== + targeted-core: + name: targeted-core + needs: [code-quality, detect-changes] + if: needs.detect-changes.outputs.core == 'true' || needs.detect-changes.outputs.any_src == 'false' + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[test] + + - name: Run core tests + run: | + python -m pytest src/hyperactive/base/tests/ src/hyperactive/tests/ -v -p no:warnings + + targeted-sklearn: + name: targeted-sklearn + needs: [code-quality, detect-changes] + if: needs.detect-changes.outputs.sklearn == 'true' + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[all_extras,test] + + - name: Run sklearn integration tests + run: | + python -m pytest src/hyperactive/integrations/sklearn/tests/ -v -p no:warnings + + targeted-sktime: + name: targeted-sktime + needs: [code-quality, detect-changes] + if: needs.detect-changes.outputs.sktime == 'true' + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[sktime-integration,test] + + - name: Run sktime integration tests + run: | + python -m pytest src/hyperactive/integrations/sktime/tests/ -v -p no:warnings + + targeted-skpro: + name: targeted-skpro + needs: [code-quality, detect-changes] + if: needs.detect-changes.outputs.skpro == 'true' + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[sktime-integration,test] + + - name: Run skpro integration tests + run: | + python -m pytest src/hyperactive/integrations/skpro/tests/ -v -p no:warnings + + targeted-utils: + name: targeted-utils + needs: [code-quality, detect-changes] + if: needs.detect-changes.outputs.utils == 'true' + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[test,test_parallel_backends] + + - name: Run utils tests + run: | + python -m pytest src/hyperactive/utils/tests/ -v -p no:warnings + + # Gate job: passes if all targeted tests pass OR are skipped (not failed) + targeted-tests-gate: + name: targeted-tests-gate + needs: [targeted-core, targeted-sklearn, targeted-sktime, targeted-skpro, targeted-utils] + if: always() + runs-on: ubuntu-latest + steps: + - name: Check targeted test results + run: | + echo "Core: ${{ needs.targeted-core.result }}" + echo "Sklearn: ${{ needs.targeted-sklearn.result }}" + echo "Sktime: ${{ needs.targeted-sktime.result }}" + echo "Skpro: ${{ needs.targeted-skpro.result }}" + echo "Utils: ${{ needs.targeted-utils.result }}" + + # Fail if any targeted test failed (not skipped) + if [[ "${{ needs.targeted-core.result }}" == "failure" ]] || \ + [[ "${{ needs.targeted-sklearn.result }}" == "failure" ]] || \ + [[ "${{ needs.targeted-sktime.result }}" == "failure" ]] || \ + [[ "${{ needs.targeted-skpro.result }}" == "failure" ]] || \ + [[ "${{ needs.targeted-utils.result }}" == "failure" ]]; then + echo "One or more targeted tests failed" + exit 1 + fi + echo "All targeted tests passed or were skipped" + + # =========================================================================== + # STAGE 3: Full test matrix (only after targeted tests pass) + # =========================================================================== test-no-extras: name: test-no-extras + needs: [targeted-tests-gate] + if: always() && needs.targeted-tests-gate.result == 'success' strategy: matrix: python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"] @@ -78,9 +325,8 @@ jobs: - name: Install dependencies run: | - python -m pip install --upgrade pip - python -m pip install build - + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir build make install-no-extras-for-test - name: Show dependencies @@ -92,6 +338,8 @@ jobs: test-all-extras: name: test-all-extras + needs: [targeted-tests-gate] + if: always() && needs.targeted-tests-gate.result == 'success' strategy: matrix: python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"] @@ -115,9 +363,8 @@ jobs: - name: Install dependencies run: | - python -m pip install --upgrade pip - python -m pip install build - + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir build make install-all-extras-for-test - name: Show dependencies @@ -129,6 +376,8 @@ jobs: test-sklearn-versions: name: test-sklearn-${{ matrix.sklearn-version }}-python-${{ matrix.python-version }} + needs: [targeted-tests-gate] + if: always() && needs.targeted-tests-gate.result == 'success' runs-on: ubuntu-latest strategy: @@ -145,20 +394,77 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Install dependencies for scikit-learn ${{ matrix.sklearn-version }} + - name: Install dependencies run: | - python -m pip install --upgrade pip - python -m pip install .[all_extras,test] scikit-learn==${{ matrix.sklearn-version }}.* + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[all_extras,test] scikit-learn==${{ matrix.sklearn-version }}.* - name: Show dependencies run: python -m pip list - - name: Run sklearn integration tests for ${{ matrix.sklearn-version }} + - name: Run sklearn integration tests run: | python -m pytest -x -p no:warnings src/hyperactive/integrations/sklearn/ + coverage: + name: coverage + needs: [targeted-tests-gate] + if: always() && needs.targeted-tests-gate.result == 'success' + runs-on: ubuntu-latest + timeout-minutes: 15 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[all_extras,test,test_parallel_backends,sktime-integration] + + - name: Run tests with coverage + run: | + python -m pytest src/hyperactive/ --cov=src/hyperactive --cov-report=xml --cov-report=term -p no:warnings + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + with: + files: ./coverage.xml + fail_ci_if_error: false + verbose: true + + doctest-examples: + name: doctest-examples + needs: [targeted-tests-gate] + if: always() && needs.targeted-tests-gate.result == 'success' + runs-on: ubuntu-latest + timeout-minutes: 20 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir -e .[all_extras,test,sktime-integration] + + - name: Run docstring example tests + run: | + python -m pytest --doctest-modules src/hyperactive/ -v -p no:warnings + test-examples: name: test-examples + needs: [targeted-tests-gate] + if: always() && needs.targeted-tests-gate.result == 'success' runs-on: ubuntu-latest timeout-minutes: 15 @@ -195,8 +501,8 @@ jobs: - name: Install dependencies if: steps.check-examples.outputs.examples_changed == 'true' run: | - python -m pip install --upgrade pip - python -m pip install build + python -m pip install --no-cache-dir --upgrade pip + python -m pip install --no-cache-dir build make install-all-extras-for-test - name: Show dependencies diff --git a/.gitignore b/.gitignore index 5bc0c596..3e7b58f9 100644 --- a/.gitignore +++ b/.gitignore @@ -32,7 +32,6 @@ MANIFEST # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec -requirements*.txt # Installer logs pip-log.txt @@ -75,6 +74,8 @@ instance/ # Sphinx documentation docs/_build/ +docs/build/ +docs/source/api_reference/auto_generated/ # PyBuilder target/ diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 00000000..ef2552cd --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,19 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 + +python: + install: + - method: pip + path: . + extra_requirements: + - docs + +build: + os: ubuntu-22.04 + tools: + python: "3.12" + +sphinx: + configuration: docs/source/conf.py diff --git a/Makefile b/Makefile index 3801fe58..73cced08 100644 --- a/Makefile +++ b/Makefile @@ -78,19 +78,19 @@ uninstall: rm -fr build dist *.egg-info install-test-requirements: - python -m pip install .[test] + python -m pip install --no-cache-dir .[test] install-build-requirements: - python -m pip install .[build] + python -m pip install --no-cache-dir .[build] install-all-extras: - python -m pip install .[all_extras] + python -m pip install --no-cache-dir .[all_extras] install-no-extras-for-test: - python -m pip install .[test] + python -m pip install --no-cache-dir .[test] install-all-extras-for-test: - python -m pip install .[all_extras,test,test_parallel_backends,sktime-integration] + python -m pip install --no-cache-dir .[all_extras,test,test_parallel_backends,sktime-integration] install-editable: pip install -e . diff --git a/README.md b/README.md index 13dcf1e6..195c37ec 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ package alongside direct interfaces to Optuna and scikit-learn optimizers, suppo --- -| | [Overview](https://github.com/SimonBlanke/Hyperactive#overview) • [Installation](https://github.com/SimonBlanke/Hyperactive#installation) • [Tutorial](https://nbviewer.org/github/SimonBlanke/hyperactive-tutorial/blob/main/notebooks/hyperactive_tutorial.ipynb) • [API reference](https://simonblanke.github.io/hyperactive-documentation/5.0/) • [Citation](https://github.com/SimonBlanke/Hyperactive#citing-hyperactive) | +| | [Overview](https://github.com/SimonBlanke/Hyperactive#overview) • [Installation](https://github.com/SimonBlanke/Hyperactive#installation) • [Tutorial](https://nbviewer.org/github/SimonBlanke/hyperactive-tutorial/blob/main/notebooks/hyperactive_tutorial.ipynb) • [API reference](https://hyperactive.readthedocs.io/en/latest/#) • [Citation](https://github.com/SimonBlanke/Hyperactive#citing-hyperactive) | |---|---| | **Open Source** | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![GC.OS Sponsored](https://img.shields.io/badge/GC.OS-Sponsored%20Project-orange.svg?style=flat&colorA=0eac92&colorB=2077b4)](https://gc-os-ai.github.io/) | | **Community** | [![Discord](https://img.shields.io/static/v1?logo=discord&label=Discord&message=chat&color=lightgreen)](https://discord.gg/7uKdHfdcJG) [![LinkedIn](https://img.shields.io/static/v1?logo=linkedin&label=LinkedIn&message=news&color=lightblue)](https://www.linkedin.com/company/german-center-for-open-source-ai) | diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..aaf9f71a --- /dev/null +++ b/docs/README.md @@ -0,0 +1,92 @@ +# Hyperactive Documentation + +This directory contains the Sphinx-based documentation for Hyperactive. + +## Building the Documentation + +### Prerequisites + +Install the required dependencies: + +```bash +pip install -r requirements.txt +``` + +You'll also need to have Hyperactive installed: + +```bash +pip install -e .. # Install Hyperactive in development mode from parent directory +``` + +### Building HTML Documentation + +From the `source` directory: + +```bash +cd source +make clean # Clean previous builds +make html # Build HTML documentation +``` + +The built documentation will be in `build/html/`. Open `build/html/index.html` in your browser to view. + +### Live Preview with Auto-Rebuild + +For development, you can use auto-rebuild mode: + +```bash +cd source +make autobuild +``` + +This will start a local server (typically at http://127.0.0.1:8000) that automatically rebuilds when you make changes to the documentation source files. + +## Documentation Structure + +- `source/` - Documentation source files + - `conf.py` - Sphinx configuration + - `index.rst` - Main landing page + - `api_reference/` - API reference documentation (auto-generated) + - `user_guide/` - User guide pages (currently stubs) + - `examples/` - Example notebooks and galleries (currently stubs) + - `get_involved/` - Contributing guidelines (currently stubs) + - `about/` - About pages (currently stubs) + - `_templates/` - Custom Sphinx templates + - `_static/` - Static files (CSS, images, etc.) +- `build/` - Built documentation (generated, not tracked in git) + +## Current Status + +The documentation is currently set up with: + +- ✅ Full API reference auto-generated from docstrings +- ✅ Sphinx configuration following SK-Time's approach +- ✅ pydata_sphinx_theme for consistent look with scientific Python ecosystem +- ✅ Structural placeholders for future static content + +Static pages (User Guide, Examples, etc.) are currently placeholder stubs marked "under construction" that can be filled in later. + +## Adding New Content + +### API Reference + +The API reference is automatically generated from docstrings. To update: + +1. Ensure your class/function has proper NumPy-style docstrings +2. Add the class/function to the appropriate `api_reference/*.rst` file using the `autosummary` directive +3. Rebuild the documentation + +### Static Pages + +To add content to the placeholder pages: + +1. Edit the corresponding `.rst` or `.md` file in the appropriate directory +2. Remove the "under construction" note +3. Add your content using reStructuredText or Markdown syntax +4. Rebuild to see your changes + +## Notes + +- All API documentation is 100% auto-generated from source code docstrings +- The structure allows for easy addition of static content in the future +- Build warnings about missing references are normal during early development diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..3cf5a52e --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,19 @@ +# Requirements for building Hyperactive documentation + +# Core Sphinx and extensions +sphinx>=7.0.0 +sphinx-autobuild +sphinx-copybutton +sphinx-design +sphinx-issues +myst-parser +numpydoc + +# Theme +pydata-sphinx-theme + +# For intersphinx linking +# These need to be importable to build docs +numpy +pandas +scikit-learn diff --git a/docs/source/Makefile b/docs/source/Makefile new file mode 100644 index 00000000..46aa97ff --- /dev/null +++ b/docs/source/Makefile @@ -0,0 +1,31 @@ +# Minimal makefile for Sphinx documentation + +# You can set these variables from the command line. +SPHINXBUILD = sphinx-build +SPHINXOPTS = -j auto +SPHINXAUTOBUILD = sphinx-autobuild +SPHINXAUTOOPTS = -j auto +SOURCEDIR = . +BUILDDIR = ../build +HYPERACTIVEDIR = ../../src/hyperactive + +.PHONY: help build autobuild + +# "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" + +clean: + rm -rf $(BUILDDIR) + rm -rf api_reference/auto_generated + @echo "Deleted directory $(BUILDDIR) and auto_generated files." + +# $(O) is meant as a shortcut for custom options. +# i.e to log stderr into a separate file: +# make build O="--no-color 2> build_warnings.log" +html: + $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +# $(O) is meant as a shortcut for custom options. +autobuild: + $(SPHINXAUTOBUILD) "$(SOURCEDIR)" "$(BUILDDIR)/html" -d "$(BUILDDIR)/doctrees" $(SPHINXAUTOOPTS) $(O) --watch "$(HYPERACTIVEDIR)" --re-ignore ".*\.json" diff --git a/docs/source/_snippets/__init__.py b/docs/source/_snippets/__init__.py new file mode 100644 index 00000000..e69a90c9 --- /dev/null +++ b/docs/source/_snippets/__init__.py @@ -0,0 +1,12 @@ +"""Documentation code snippets. + +This package contains testable Python code snippets that are included in the +documentation using Sphinx's ``literalinclude`` directive. Each snippet file +can be executed directly to verify it works correctly. + +The snippets are organized by documentation section: +- getting_started/: Quick start examples +- installation/: Installation verification examples +- user_guide/: In-depth tutorial examples +- examples/: Gallery examples +""" diff --git a/docs/source/_snippets/conftest.py b/docs/source/_snippets/conftest.py new file mode 100644 index 00000000..0994864b --- /dev/null +++ b/docs/source/_snippets/conftest.py @@ -0,0 +1,46 @@ +"""Pytest configuration for documentation snippets. + +This conftest provides shared fixtures that snippet files can use for testing. +The fixtures ensure consistent behavior across all snippet tests. +""" + +import numpy as np +import pytest + + +@pytest.fixture +def simple_search_space(): + """Simple search space for basic examples.""" + return { + "x": np.arange(-5, 5, 0.1), + "y": np.arange(-5, 5, 0.1), + } + + +@pytest.fixture +def simple_objective(): + """Simple objective function for basic examples.""" + + def objective(params): + x = params["x"] + y = params["y"] + return -(x**2 + y**2) + + return objective + + +@pytest.fixture +def sklearn_data(): + """Load iris dataset for sklearn examples.""" + from sklearn.datasets import load_iris + + return load_iris(return_X_y=True) + + +@pytest.fixture +def sklearn_train_test_split(sklearn_data): + """Split sklearn data into train and test sets.""" + from sklearn.model_selection import train_test_split + + X, y = sklearn_data + return train_test_split(X, y, test_size=0.2, random_state=42) diff --git a/docs/source/_snippets/examples/__init__.py b/docs/source/_snippets/examples/__init__.py new file mode 100644 index 00000000..581dc357 --- /dev/null +++ b/docs/source/_snippets/examples/__init__.py @@ -0,0 +1 @@ +"""Example gallery code snippets for documentation.""" diff --git a/docs/source/_snippets/examples/advanced_examples.py b/docs/source/_snippets/examples/advanced_examples.py new file mode 100644 index 00000000..8f2627df --- /dev/null +++ b/docs/source/_snippets/examples/advanced_examples.py @@ -0,0 +1,77 @@ +"""Advanced examples for the examples.rst page. + +This snippet file contains runnable examples demonstrating Hyperactive's +advanced functionality like warm starting and optimizer comparison. +""" + +from sklearn.datasets import load_wine +from sklearn.ensemble import RandomForestClassifier + +from hyperactive.experiment.integrations import SklearnCvExperiment +from hyperactive.opt.gfo import HillClimbing + +# Setup common fixtures for examples +X, y = load_wine(return_X_y=True) +experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(random_state=42), + X=X, + y=y, + cv=3, +) +search_space = { + "n_estimators": list(range(10, 101, 10)), + "max_depth": list(range(1, 11)), + "min_samples_split": list(range(2, 11)), +} + + +# [start:warm_starting] + +# Previous best parameters +warm_start_points = [ + {"n_estimators": 100, "max_depth": 10, "min_samples_split": 5}, +] + +optimizer = HillClimbing( + search_space=search_space, + n_iter=40, + experiment=experiment, + initialize={"warm_start": warm_start_points}, +) +best_params = optimizer.solve() +# [end:warm_starting] + + +# [start:comparing_optimizers] +from hyperactive.opt.gfo import ( + BayesianOptimizer, + HillClimbing, + ParticleSwarmOptimizer, + RandomSearch, +) + +optimizers = { + "HillClimbing": HillClimbing, + "RandomSearch": RandomSearch, + "Bayesian": BayesianOptimizer, + "ParticleSwarm": ParticleSwarmOptimizer, +} + +results = {} +for name, OptClass in optimizers.items(): + optimizer = OptClass( + search_space=search_space, + n_iter=50, + experiment=experiment, + random_state=42, + ) + best = optimizer.solve() + score, _ = experiment.score(best) + results[name] = {"params": best, "score": score} + print(f"{name}: score={score:.4f}") +# [end:comparing_optimizers] + + +if __name__ == "__main__": + print("Advanced examples passed!") + print(f"Best optimizer results: {results}") diff --git a/docs/source/_snippets/examples/basic_examples.py b/docs/source/_snippets/examples/basic_examples.py new file mode 100644 index 00000000..c6bff6ba --- /dev/null +++ b/docs/source/_snippets/examples/basic_examples.py @@ -0,0 +1,73 @@ +"""Basic examples for the examples.rst page. + +This snippet file contains runnable examples demonstrating Hyperactive's +basic functionality including custom function and sklearn optimization. +""" + +import numpy as np + +# [start:custom_function] +from hyperactive.opt.gfo import HillClimbing + + +def objective(params): + x = params["x"] + y = params["y"] + return -(x**2 + y**2) # Maximize (minimize the parabola) + + +search_space = { + "x": np.arange(-5, 5, 0.1), + "y": np.arange(-5, 5, 0.1), +} + +optimizer = HillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, +) +best_params = optimizer.solve() +print(f"Best parameters: {best_params}") +# [end:custom_function] + + +# [start:sklearn_tuning] +from sklearn.datasets import load_wine +from sklearn.ensemble import RandomForestClassifier + +from hyperactive.experiment.integrations import SklearnCvExperiment +from hyperactive.opt.gfo import HillClimbing + +# Load data +X, y = load_wine(return_X_y=True) + +# Create experiment +experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(random_state=42), + X=X, + y=y, + cv=3, +) + +# Define search space +search_space = { + "n_estimators": list(range(10, 201)), + "max_depth": list(range(1, 21)), + "min_samples_split": list(range(2, 21)), + "min_samples_leaf": list(range(1, 11)), +} + +# Optimize +optimizer = HillClimbing( + search_space=search_space, + n_iter=40, + random_state=42, + experiment=experiment, +) +best_params = optimizer.solve() +# [end:sklearn_tuning] + + +if __name__ == "__main__": + print("Basic examples passed!") + print(f"Custom function best: {best_params}") diff --git a/docs/source/_snippets/getting_started/__init__.py b/docs/source/_snippets/getting_started/__init__.py new file mode 100644 index 00000000..d3edc1cc --- /dev/null +++ b/docs/source/_snippets/getting_started/__init__.py @@ -0,0 +1 @@ +"""Getting started code snippets for documentation.""" diff --git a/docs/source/_snippets/getting_started/bayesian_optimizer.py b/docs/source/_snippets/getting_started/bayesian_optimizer.py new file mode 100644 index 00000000..db8cf2b0 --- /dev/null +++ b/docs/source/_snippets/getting_started/bayesian_optimizer.py @@ -0,0 +1,41 @@ +"""Bayesian Optimizer example for documentation. + +This snippet demonstrates the usage of BayesianOptimizer for +optimization problems. It is included in get_started.rst. +""" + +# [start:full_example] +# [end:full_example] +# Need to define experiment and search_space for standalone execution +import numpy as np + +from hyperactive.opt.gfo import BayesianOptimizer + + +def experiment(params): + """Simple objective function.""" + x = params["x"] + y = params["y"] + return -(x**2 + y**2) + + +search_space = { + "x": np.arange(-5, 5, 0.1), + "y": np.arange(-5, 5, 0.1), +} + +# [start:optimizer_usage] +optimizer = BayesianOptimizer( + search_space=search_space, + n_iter=30, + experiment=experiment, +) +best_params = optimizer.solve() +# [end:optimizer_usage] + +if __name__ == "__main__": + print(f"Best parameters: {best_params}") + # Verify the optimization found parameters close to (0, 0) + assert abs(best_params["x"]) < 2.0, f"Expected x near 0, got {best_params['x']}" + assert abs(best_params["y"]) < 2.0, f"Expected y near 0, got {best_params['y']}" + print("Bayesian optimizer example passed!") diff --git a/docs/source/_snippets/getting_started/index_bayesian.py b/docs/source/_snippets/getting_started/index_bayesian.py new file mode 100644 index 00000000..98cc7e09 --- /dev/null +++ b/docs/source/_snippets/getting_started/index_bayesian.py @@ -0,0 +1,37 @@ +"""Bayesian optimization example for index page. + +This snippet demonstrates Bayesian optimization with a more complex +objective function shown on the landing page. It is included in index.rst. +""" + +# [start:full_example] +import numpy as np + +from hyperactive.opt.gfo import BayesianOptimizer + + +def complex_objective(params): + x = params["x"] + y = params["y"] + return -((x - 2) ** 2 + (y + 1) ** 2) + np.sin(x * y) + + +search_space = { + "x": np.linspace(-5, 5, 100), + "y": np.linspace(-5, 5, 100), +} + +optimizer = BayesianOptimizer( + search_space=search_space, + n_iter=50, + experiment=complex_objective, +) +best_params = optimizer.solve() +# [end:full_example] + +if __name__ == "__main__": + print(f"Best parameters: {best_params}") + # Verify we got valid parameters + assert "x" in best_params + assert "y" in best_params + print("Index Bayesian example passed!") diff --git a/docs/source/_snippets/getting_started/index_custom_function.py b/docs/source/_snippets/getting_started/index_custom_function.py new file mode 100644 index 00000000..ca7a9e24 --- /dev/null +++ b/docs/source/_snippets/getting_started/index_custom_function.py @@ -0,0 +1,39 @@ +"""Custom function example for index page. + +This snippet demonstrates the basic custom function optimization +shown on the landing page. It is included in index.rst. +""" + +# [start:full_example] +import numpy as np + +from hyperactive.opt.gfo import HillClimbing + + +# Define your objective function +def objective(params): + x, y = params["x"], params["y"] + return -(x**2 + y**2) # Maximize (minimize negative) + + +# Define the search space +search_space = { + "x": np.arange(-5, 5, 0.1), + "y": np.arange(-5, 5, 0.1), +} + +# Create optimizer and solve +optimizer = HillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, +) +best_params = optimizer.solve() +print(f"Best parameters: {best_params}") +# [end:full_example] + +if __name__ == "__main__": + # Verify the optimization found parameters close to (0, 0) + assert abs(best_params["x"]) < 1.0, f"Expected x near 0, got {best_params['x']}" + assert abs(best_params["y"]) < 1.0, f"Expected y near 0, got {best_params['y']}" + print("Index custom function example passed!") diff --git a/docs/source/_snippets/getting_started/index_sklearn_tuning.py b/docs/source/_snippets/getting_started/index_sklearn_tuning.py new file mode 100644 index 00000000..6080c43c --- /dev/null +++ b/docs/source/_snippets/getting_started/index_sklearn_tuning.py @@ -0,0 +1,35 @@ +"""Scikit-learn tuning example for index page. + +This snippet demonstrates sklearn integration using OptCV +shown on the landing page. It is included in index.rst. +""" + +# [start:full_example] +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.svm import SVC + +from hyperactive.integrations.sklearn import OptCV +from hyperactive.opt.gfo import HillClimbing + +# Load data +X, y = load_iris(return_X_y=True) +X_train, X_test, y_train, y_test = train_test_split(X, y) + +# Define optimizer with search space +search_space = {"kernel": ["linear", "rbf"], "C": [0.1, 1, 10]} +optimizer = HillClimbing(search_space=search_space, n_iter=20) + +# Create tuned estimator and fit +tuned_svc = OptCV(SVC(), optimizer) +tuned_svc.fit(X_train, y_train) + +print(f"Best params: {tuned_svc.best_params_}") +# [end:full_example] + +if __name__ == "__main__": + # Verify we got valid results + assert hasattr(tuned_svc, "best_params_") + assert "kernel" in tuned_svc.best_params_ + assert "C" in tuned_svc.best_params_ + print("Index sklearn tuning example passed!") diff --git a/docs/source/_snippets/getting_started/quick_start.py b/docs/source/_snippets/getting_started/quick_start.py new file mode 100644 index 00000000..7f342fce --- /dev/null +++ b/docs/source/_snippets/getting_started/quick_start.py @@ -0,0 +1,41 @@ +"""Quick start example for documentation. + +This snippet demonstrates the basic usage of Hyperactive for optimizing +a custom objective function. It is included in get_started.rst. +""" + +# [start:full_example] +import numpy as np + +from hyperactive.opt.gfo import HillClimbing + + +# 1. Define your objective function +def objective(params): + x = params["x"] + y = params["y"] + return -(x**2 + y**2) # Hyperactive maximizes by default + + +# 2. Define the search space +search_space = { + "x": np.arange(-5, 5, 0.1), + "y": np.arange(-5, 5, 0.1), +} + +# 3. Create an optimizer and solve +optimizer = HillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, +) +best_params = optimizer.solve() + +print(f"Best parameters: {best_params}") +# [end:full_example] + +if __name__ == "__main__": + # Verify the optimization found parameters close to (0, 0) + assert abs(best_params["x"]) < 1.0, f"Expected x near 0, got {best_params['x']}" + assert abs(best_params["y"]) < 1.0, f"Expected y near 0, got {best_params['y']}" + print("Quick start example passed!") diff --git a/docs/source/_snippets/getting_started/sklearn_optcv.py b/docs/source/_snippets/getting_started/sklearn_optcv.py new file mode 100644 index 00000000..307ea2b3 --- /dev/null +++ b/docs/source/_snippets/getting_started/sklearn_optcv.py @@ -0,0 +1,41 @@ +"""Scikit-learn OptCV wrapper example for documentation. + +This snippet demonstrates how to use OptCV as a drop-in replacement +for GridSearchCV. It is included in get_started.rst. +""" + +# [start:full_example] +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.svm import SVC + +from hyperactive.integrations.sklearn import OptCV +from hyperactive.opt.gfo import HillClimbing + +# Load and split data +X, y = load_iris(return_X_y=True) +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) + +# Define optimizer with search space +search_space = {"kernel": ["linear", "rbf"], "C": [0.1, 1, 10, 100]} +optimizer = HillClimbing(search_space=search_space, n_iter=20) + +# Create tuned estimator (like GridSearchCV) +tuned_svc = OptCV(SVC(), optimizer) + +# Fit and predict as usual +tuned_svc.fit(X_train, y_train) +y_pred = tuned_svc.predict(X_test) + +# Access results +print(f"Best params: {tuned_svc.best_params_}") +print(f"Best estimator: {tuned_svc.best_estimator_}") +# [end:full_example] + +if __name__ == "__main__": + # Verify we got valid results + assert hasattr(tuned_svc, "best_params_") + assert hasattr(tuned_svc, "best_estimator_") + assert "kernel" in tuned_svc.best_params_ + assert "C" in tuned_svc.best_params_ + print("Sklearn OptCV example passed!") diff --git a/docs/source/_snippets/getting_started/sklearn_random_forest.py b/docs/source/_snippets/getting_started/sklearn_random_forest.py new file mode 100644 index 00000000..10d0d1db --- /dev/null +++ b/docs/source/_snippets/getting_started/sklearn_random_forest.py @@ -0,0 +1,48 @@ +"""Scikit-learn RandomForest example for documentation. + +This snippet demonstrates how to optimize a RandomForest classifier +using Hyperactive's SklearnCvExperiment. It is included in get_started.rst. +""" + +# [start:full_example] +from sklearn.datasets import load_iris +from sklearn.ensemble import RandomForestClassifier + +from hyperactive.experiment.integrations import SklearnCvExperiment +from hyperactive.opt.gfo import HillClimbing + +# Load data +X, y = load_iris(return_X_y=True) + +# Create an experiment that handles cross-validation +experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(random_state=42), + X=X, + y=y, + cv=5, +) + +# Define hyperparameter search space +search_space = { + "n_estimators": list(range(10, 200, 10)), + "max_depth": list(range(1, 20)), + "min_samples_split": list(range(2, 10)), +} + +# Optimize +optimizer = HillClimbing( + search_space=search_space, + n_iter=50, + experiment=experiment, +) +best_params = optimizer.solve() + +print(f"Best hyperparameters: {best_params}") +# [end:full_example] + +if __name__ == "__main__": + # Verify we got valid hyperparameters + assert "n_estimators" in best_params + assert "max_depth" in best_params + assert "min_samples_split" in best_params + print("Sklearn RandomForest example passed!") diff --git a/docs/source/_snippets/installation/__init__.py b/docs/source/_snippets/installation/__init__.py new file mode 100644 index 00000000..3cee52a4 --- /dev/null +++ b/docs/source/_snippets/installation/__init__.py @@ -0,0 +1 @@ +"""Installation code snippets for documentation.""" diff --git a/docs/source/_snippets/installation/verify_installation.py b/docs/source/_snippets/installation/verify_installation.py new file mode 100644 index 00000000..5a97971c --- /dev/null +++ b/docs/source/_snippets/installation/verify_installation.py @@ -0,0 +1,32 @@ +"""Installation verification snippet. + +This snippet demonstrates how to verify Hyperactive installation. +""" + +# [start:verify_installation] +import hyperactive + +print(f"Hyperactive version: {hyperactive.__version__}") + +# Quick test +import numpy as np + +from hyperactive.opt.gfo import HillClimbing + + +def objective(params): + return -(params["x"] ** 2) + + +optimizer = HillClimbing( + search_space={"x": np.arange(-5, 5, 0.1)}, + n_iter=10, + experiment=objective, +) +best = optimizer.solve() +print(f"Test optimization successful: {best}") +# [end:verify_installation] + + +if __name__ == "__main__": + print("Installation verification passed!") diff --git a/docs/source/_snippets/user_guide/__init__.py b/docs/source/_snippets/user_guide/__init__.py new file mode 100644 index 00000000..41675fba --- /dev/null +++ b/docs/source/_snippets/user_guide/__init__.py @@ -0,0 +1 @@ +"""User guide code snippets for documentation.""" diff --git a/docs/source/_snippets/user_guide/experiments.py b/docs/source/_snippets/user_guide/experiments.py new file mode 100644 index 00000000..be0c09b6 --- /dev/null +++ b/docs/source/_snippets/user_guide/experiments.py @@ -0,0 +1,231 @@ +"""Experiments page code snippets for documentation. + +This snippet file contains examples from the experiments.rst page covering +custom objectives, built-in experiments, and benchmarks. +""" + +import numpy as np + + +# [start:simple_objective] +def objective(params): + x = params["x"] + y = params["y"] + # Hyperactive MAXIMIZES this score + return -(x**2 + y**2) + + +# [end:simple_objective] + + +# [start:ackley_function] +from hyperactive.opt.gfo import BayesianOptimizer + + +# Ackley function (a common benchmark) +def ackley(params): + x = params["x"] + y = params["y"] + + term1 = -20 * np.exp(-0.2 * np.sqrt(0.5 * (x**2 + y**2))) + term2 = -np.exp(0.5 * (np.cos(2 * np.pi * x) + np.cos(2 * np.pi * y))) + result = term1 + term2 + np.e + 20 + + return -result # Negate to maximize (minimize the Ackley function) + + +search_space = { + "x": np.linspace(-5, 5, 100), + "y": np.linspace(-5, 5, 100), +} + +optimizer = BayesianOptimizer( + search_space=search_space, + n_iter=50, + experiment=ackley, +) +best_params = optimizer.solve() +# [end:ackley_function] + + +# [start:external_simulation] +import subprocess + + +def run_simulation(params): + # Run an external simulation with the given parameters + result = subprocess.run( + ["./my_simulation", str(params["param1"]), str(params["param2"])], + capture_output=True, + text=True, + ) + # Parse the output and return the score + score = float(result.stdout.strip()) + return score + + +# [end:external_simulation] + + +# [start:sklearn_cv_experiment] +from sklearn.datasets import load_iris +from sklearn.ensemble import RandomForestClassifier +from sklearn.metrics import accuracy_score +from sklearn.model_selection import KFold + +from hyperactive.experiment.integrations import SklearnCvExperiment +from hyperactive.opt.gfo import HillClimbing + +X, y = load_iris(return_X_y=True) + +experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(random_state=42), + X=X, + y=y, + cv=KFold(n_splits=5, shuffle=True, random_state=42), + scoring=accuracy_score, # Optional: defaults to estimator's score method +) + +search_space = { + "n_estimators": list(range(10, 200, 10)), + "max_depth": list(range(1, 20)), + "min_samples_split": list(range(2, 10)), +} + +optimizer = HillClimbing( + search_space=search_space, + n_iter=30, + experiment=experiment, +) +best_params = optimizer.solve() +# [end:sklearn_cv_experiment] + + +# [start:sktime_forecasting] +from sktime.datasets import load_airline +from sktime.forecasting.naive import NaiveForecaster + +from hyperactive.experiment.integrations import SktimeForecastingExperiment +from hyperactive.opt.gfo import RandomSearch + +y = load_airline() + +experiment = SktimeForecastingExperiment( + estimator=NaiveForecaster(), + y=y, + fh=[1, 2, 3], # Forecast horizon +) + +search_space = { + "strategy": ["mean", "last", "drift"], +} + +optimizer = RandomSearch( + search_space=search_space, + n_iter=10, + experiment=experiment, +) +best_params = optimizer.solve() +# [end:sktime_forecasting] + + +# [start:torch_experiment] +from hyperactive.experiment.integrations import TorchExperiment + +experiment = TorchExperiment( + model_class=MyLightningModel, + datamodule=my_datamodule, + trainer_kwargs={"max_epochs": 10}, +) +# [end:torch_experiment] + + +# [start:benchmark_experiments] +from hyperactive.experiment.bench import Ackley + +# Use benchmark as experiment +ackley = Ackley(dim=2) + +optimizer = BayesianOptimizer( + search_space=ackley.search_space, + n_iter=50, + experiment=ackley, +) +# [end:benchmark_experiments] + + +# [start:score_method] +from hyperactive.experiment.integrations import SklearnCvExperiment + +experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(), + X=X, + y=y, + cv=5, +) + +# Evaluate specific parameters +params = {"n_estimators": 100, "max_depth": 10} +score, additional_info = experiment.score(params) + +print(f"Score: {score}") +print(f"Additional info: {additional_info}") +# [end:score_method] + + +# [start:robust_objective] +def robust_objective(params): + try: + score = compute_score(params) + return score + except Exception: + return -np.inf # Return bad score on failure + + +# [end:robust_objective] + + +# --- Runnable test code below --- +if __name__ == "__main__": + # Test simple objective + params = {"x": 0.0, "y": 0.0} + score = objective(params) + assert score == 0.0, f"Expected 0.0, got {score}" + + # Test Ackley function + params = {"x": 0.0, "y": 0.0} + ackley_score = ackley(params) + # Ackley minimum is at (0,0) with value 0 + assert abs(ackley_score) < 0.01, f"Expected ~0, got {ackley_score}" + + # Test sklearn CV experiment + from sklearn.datasets import load_iris + from sklearn.ensemble import RandomForestClassifier + + from hyperactive.experiment.integrations import SklearnCvExperiment + from hyperactive.opt.gfo import HillClimbing + + X, y = load_iris(return_X_y=True) + experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(random_state=42), + X=X, + y=y, + cv=3, + ) + + search_space = { + "n_estimators": [10, 50, 100], + "max_depth": [3, 5, 10], + } + + optimizer = HillClimbing( + search_space=search_space, + n_iter=5, + experiment=experiment, + random_state=42, + ) + best_params = optimizer.solve() + assert "n_estimators" in best_params + assert "max_depth" in best_params + + print("Experiments snippets passed!") diff --git a/docs/source/_snippets/user_guide/integrations.py b/docs/source/_snippets/user_guide/integrations.py new file mode 100644 index 00000000..4d83bcf5 --- /dev/null +++ b/docs/source/_snippets/user_guide/integrations.py @@ -0,0 +1,277 @@ +"""Integrations page code snippets for documentation. + +This snippet file contains examples from the integrations.rst page covering +sklearn, sktime, skpro, and PyTorch integrations. +""" + +# [start:optcv_basic] +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.svm import SVC + +from hyperactive.integrations.sklearn import OptCV +from hyperactive.opt.gfo import HillClimbing + +# Load data +X, y = load_iris(return_X_y=True) +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) + +# Define search space and optimizer +search_space = {"kernel": ["linear", "rbf"], "C": [0.1, 1, 10, 100]} +optimizer = HillClimbing(search_space=search_space, n_iter=20) + +# Create tuned estimator +tuned_svc = OptCV(SVC(), optimizer) + +# Fit like any sklearn estimator +tuned_svc.fit(X_train, y_train) + +# Predict +y_pred = tuned_svc.predict(X_test) + +# Access results +print(f"Best parameters: {tuned_svc.best_params_}") +print(f"Best estimator: {tuned_svc.best_estimator_}") +# [end:optcv_basic] + + +# [start:different_optimizers] +from hyperactive.opt import GridSearchSk as GridSearch +from hyperactive.opt.gfo import BayesianOptimizer, GeneticAlgorithm +from hyperactive.opt.optuna import TPEOptimizer + +# Grid Search (exhaustive) +optimizer = GridSearch(search_space) +tuned_model = OptCV(SVC(), optimizer) + +# Bayesian Optimization (smart sampling) +optimizer = BayesianOptimizer(search_space=search_space, n_iter=30) +tuned_model = OptCV(SVC(), optimizer) + +# Genetic Algorithm (population-based) +optimizer = GeneticAlgorithm(search_space=search_space, n_iter=50) +tuned_model = OptCV(SVC(), optimizer) + +# Optuna TPE +optimizer = TPEOptimizer(search_space=search_space, n_iter=30) +tuned_model = OptCV(SVC(), optimizer) +# [end:different_optimizers] + + +# [start:pipeline_integration] +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC + +# Create pipeline +pipe = Pipeline( + [ + ("scaler", StandardScaler()), + ("svc", SVC()), + ] +) + +# Search space with pipeline parameter naming +search_space = { + "svc__kernel": ["linear", "rbf"], + "svc__C": [0.1, 1, 10], +} + +optimizer = HillClimbing(search_space=search_space, n_iter=20) +tuned_pipe = OptCV(pipe, optimizer) +tuned_pipe.fit(X_train, y_train) +# [end:pipeline_integration] + + +# [start:forecasting_optcv] +from sktime.datasets import load_airline +from sktime.forecasting.naive import NaiveForecaster +from sktime.split import ExpandingWindowSplitter, temporal_train_test_split + +from hyperactive.integrations.sktime import ForecastingOptCV +from hyperactive.opt import GridSearchSk as GridSearch + +# Load time series data +y = load_airline() +y_train, y_test = temporal_train_test_split(y, test_size=12) + +# Define search space +param_grid = {"strategy": ["mean", "last", "drift"]} + +# Create tuned forecaster +tuned_forecaster = ForecastingOptCV( + NaiveForecaster(), + GridSearch(param_grid), + cv=ExpandingWindowSplitter( + initial_window=12, + step_length=3, + fh=range(1, 13), + ), +) + +# Fit and predict +tuned_forecaster.fit(y_train, fh=range(1, 13)) +y_pred = tuned_forecaster.predict() + +# Access results +print(f"Best parameters: {tuned_forecaster.best_params_}") +print(f"Best forecaster: {tuned_forecaster.best_forecaster_}") +# [end:forecasting_optcv] + + +# [start:tsc_optcv] +from sklearn.model_selection import KFold +from sktime.classification.dummy import DummyClassifier +from sktime.datasets import load_unit_test + +from hyperactive.integrations.sktime import TSCOptCV +from hyperactive.opt import GridSearchSk as GridSearch + +# Load time series classification data +X_train, y_train = load_unit_test( + return_X_y=True, + split="TRAIN", + return_type="pd-multiindex", +) +X_test, _ = load_unit_test( + return_X_y=True, + split="TEST", + return_type="pd-multiindex", +) + +# Define search space +param_grid = {"strategy": ["most_frequent", "stratified"]} + +# Create tuned classifier +tuned_classifier = TSCOptCV( + DummyClassifier(), + GridSearch(param_grid), + cv=KFold(n_splits=2, shuffle=False), +) + +# Fit and predict +tuned_classifier.fit(X_train, y_train) +y_pred = tuned_classifier.predict(X_test) + +# Access results +print(f"Best parameters: {tuned_classifier.best_params_}") +# [end:tsc_optcv] + + +# [start:skpro_experiment] +from hyperactive.experiment.integrations import SkproProbaRegExperiment +from hyperactive.opt.gfo import HillClimbing + +experiment = SkproProbaRegExperiment( + estimator=YourSkproEstimator(), + X=X, + y=y, + cv=5, +) + +optimizer = HillClimbing( + search_space=search_space, + n_iter=30, + experiment=experiment, +) +best_params = optimizer.solve() +# [end:skpro_experiment] + + +# [start:pytorch_lightning] +import lightning as L + +from hyperactive.experiment.integrations import TorchExperiment +from hyperactive.opt.gfo import BayesianOptimizer + + +# Define your Lightning module +class MyModel(L.LightningModule): + def __init__(self, learning_rate=0.001, hidden_size=64): + super().__init__() + self.learning_rate = learning_rate + self.hidden_size = hidden_size + # ... model definition + + def training_step(self, batch, batch_idx): + # ... training logic + pass + + def configure_optimizers(self): + return torch.optim.Adam(self.parameters(), lr=self.learning_rate) + + +# Create experiment +experiment = TorchExperiment( + model_class=MyModel, + datamodule=my_datamodule, + trainer_kwargs={ + "max_epochs": 10, + "accelerator": "auto", + }, +) + +# Define search space +search_space = { + "learning_rate": [0.0001, 0.001, 0.01], + "hidden_size": [32, 64, 128, 256], +} + +# Optimize +optimizer = BayesianOptimizer( + search_space=search_space, + n_iter=20, + experiment=experiment, +) +best_params = optimizer.solve() +# [end:pytorch_lightning] + + +# --- Runnable test code below --- +if __name__ == "__main__": + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import StandardScaler + from sklearn.svm import SVC + + from hyperactive.integrations.sklearn import OptCV + from hyperactive.opt.gfo import HillClimbing + + # Test OptCV basic usage + X, y = load_iris(return_X_y=True) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + search_space = {"kernel": ["linear", "rbf"], "C": [0.1, 1, 10]} + optimizer = HillClimbing(search_space=search_space, n_iter=10) + tuned_svc = OptCV(SVC(), optimizer) + tuned_svc.fit(X_train, y_train) + y_pred = tuned_svc.predict(X_test) + + assert hasattr(tuned_svc, "best_params_") + assert hasattr(tuned_svc, "best_estimator_") + assert "kernel" in tuned_svc.best_params_ + assert "C" in tuned_svc.best_params_ + + # Test pipeline integration + pipe = Pipeline( + [ + ("scaler", StandardScaler()), + ("svc", SVC()), + ] + ) + + search_space = { + "svc__kernel": ["linear", "rbf"], + "svc__C": [0.1, 1], + } + + optimizer = HillClimbing(search_space=search_space, n_iter=5) + tuned_pipe = OptCV(pipe, optimizer) + tuned_pipe.fit(X_train, y_train) + + assert hasattr(tuned_pipe, "best_params_") + + print("Integrations snippets passed!") diff --git a/docs/source/_snippets/user_guide/introduction.py b/docs/source/_snippets/user_guide/introduction.py new file mode 100644 index 00000000..17b2376b --- /dev/null +++ b/docs/source/_snippets/user_guide/introduction.py @@ -0,0 +1,124 @@ +"""Introduction page code snippets for documentation. + +This snippet file contains examples from the introduction.rst page. +Some snippets are illustrative (showing patterns) while others are runnable. +""" + +import numpy as np + +# Define placeholders for illustrative code +# These allow the file to be imported without errors +X_train = np.array([[1, 2], [3, 4], [5, 6]]) +y_train = np.array([0, 1, 0]) +X = X_train +y = y_train + + +# [start:simple_objective] +def objective(params): + x = params["x"] + y = params["y"] + # Return a score to maximize + return -(x**2 + y**2) + + +# [end:simple_objective] + + +# [start:sklearn_experiment_intro] +from sklearn.ensemble import RandomForestClassifier + +from hyperactive.experiment.integrations import SklearnCvExperiment + +experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(), + X=X_train, + y=y_train, + cv=5, +) +# [end:sklearn_experiment_intro] + + +# [start:optimizer_imports] +# [end:optimizer_imports] +# [start:search_space_definition] +import numpy as np + +from hyperactive.opt.gfo import ( + HillClimbing, # Local search +) + +search_space = { + # Discrete integer values + "n_estimators": list(range(10, 200, 10)), + # Continuous values (discretized) + "learning_rate": np.logspace(-4, 0, 20), + # Categorical values + "kernel": ["linear", "rbf", "poly"], +} +# [end:search_space_definition] + + +# [start:workflow_experiment_options] +# Option A: Custom function +def my_objective(params): + # Your evaluation logic here + return score + + +# Option B: Built-in sklearn experiment +from hyperactive.experiment.integrations import SklearnCvExperiment + +experiment = SklearnCvExperiment( + estimator=YourEstimator(), + X=X, + y=y, + cv=5, +) +# [end:workflow_experiment_options] + + +# [start:workflow_search_space] +search_space = { + "param1": [1, 2, 3, 4, 5], + "param2": np.linspace(0.1, 1.0, 10), + "param3": ["option_a", "option_b"], +} +# [end:workflow_search_space] + + +# [start:workflow_optimizer] + +optimizer = HillClimbing( + search_space=search_space, + n_iter=100, # Number of iterations + experiment=experiment, + random_state=42, # For reproducibility +) +# [end:workflow_optimizer] + + +# [start:workflow_solve] +best_params = optimizer.solve() +print(f"Best parameters: {best_params}") +# [end:workflow_solve] + + +# [start:warm_starting] +warm_start = [ + {"n_estimators": 100, "max_depth": 10}, # Start from known good point +] + +optimizer = HillClimbing( + search_space=search_space, + n_iter=50, + experiment=experiment, + initialize={"warm_start": warm_start}, +) +# [end:warm_starting] + + +if __name__ == "__main__": + # The actual test code runs here + print("Introduction snippet file is importable!") + print("Full integration test runs in test_doc_snippets.py") diff --git a/docs/source/_snippets/user_guide/optimizers.py b/docs/source/_snippets/user_guide/optimizers.py new file mode 100644 index 00000000..181da4e5 --- /dev/null +++ b/docs/source/_snippets/user_guide/optimizers.py @@ -0,0 +1,324 @@ +"""Optimizers page code snippets for documentation. + +This snippet file contains examples from the optimizers.rst page covering +all optimizer categories and configurations. +""" + +import numpy as np + +# Define common test fixtures +search_space = { + "x": np.arange(-5, 5, 0.5), + "y": np.arange(-5, 5, 0.5), +} + + +def objective(params): + x = params["x"] + y = params["y"] + return -(x**2 + y**2) + + +# ============================================================================ +# Local Search Optimizers +# ============================================================================ + +# [start:hill_climbing] +from hyperactive.opt.gfo import HillClimbing + +optimizer = HillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:hill_climbing] + + +# [start:simulated_annealing] +from hyperactive.opt.gfo import SimulatedAnnealing + +optimizer = SimulatedAnnealing( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:simulated_annealing] + + +# [start:repulsing_hill_climbing] +from hyperactive.opt.gfo import RepulsingHillClimbing + +optimizer = RepulsingHillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:repulsing_hill_climbing] + + +# [start:stochastic_hill_climbing] +from hyperactive.opt.gfo import StochasticHillClimbing + +optimizer = StochasticHillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, + p_accept=0.3, # Probability of accepting worse solutions +) +# [end:stochastic_hill_climbing] + + +# [start:downhill_simplex] +from hyperactive.opt.gfo import DownhillSimplexOptimizer + +optimizer = DownhillSimplexOptimizer( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:downhill_simplex] + + +# ============================================================================ +# Global Search Optimizers +# ============================================================================ + +# [start:random_search] +from hyperactive.opt.gfo import RandomSearch + +optimizer = RandomSearch( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:random_search] + + +# [start:grid_search] +from hyperactive.opt.gfo import GridSearch + +optimizer = GridSearch( + search_space=search_space, + experiment=objective, +) +# [end:grid_search] + + +# [start:random_restart_hill_climbing] +from hyperactive.opt.gfo import RandomRestartHillClimbing + +optimizer = RandomRestartHillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:random_restart_hill_climbing] + + +# [start:powells_pattern] +# [end:powells_pattern] + + +# ============================================================================ +# Population Methods +# ============================================================================ + +# [start:particle_swarm] +from hyperactive.opt.gfo import ParticleSwarmOptimizer + +optimizer = ParticleSwarmOptimizer( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:particle_swarm] + + +# [start:genetic_algorithm] +from hyperactive.opt.gfo import GeneticAlgorithm + +optimizer = GeneticAlgorithm( + search_space=search_space, + n_iter=100, + experiment=objective, +) +# [end:genetic_algorithm] + + +# [start:evolution_strategy] +# [end:evolution_strategy] + + +# [start:differential_evolution] +# [end:differential_evolution] + + +# [start:parallel_tempering] +# [end:parallel_tempering] + + +# [start:spiral_optimization] +# [end:spiral_optimization] + + +# ============================================================================ +# Sequential Model-Based (Bayesian) +# ============================================================================ + +# [start:bayesian_optimizer] +from hyperactive.opt.gfo import BayesianOptimizer + +optimizer = BayesianOptimizer( + search_space=search_space, + n_iter=50, + experiment=objective, +) +# [end:bayesian_optimizer] + + +# [start:tpe] +# [end:tpe] + + +# [start:forest_optimizer] +# [end:forest_optimizer] + + +# [start:lipschitz_direct] +# [end:lipschitz_direct] + + +# ============================================================================ +# Optuna Backend +# ============================================================================ + +# [start:optuna_imports] +from hyperactive.opt.optuna import ( + TPEOptimizer, # Tree-Parzen Estimators +) + +# [end:optuna_imports] + + +# [start:optuna_tpe] + +optimizer = TPEOptimizer( + search_space=search_space, + n_iter=50, + experiment=objective, +) +# [end:optuna_tpe] + + +# ============================================================================ +# Configuration Examples +# ============================================================================ + +# [start:common_parameters] +optimizer = SomeOptimizer( + search_space=search_space, # Required: parameter ranges + n_iter=100, # Required: number of iterations + experiment=objective, # Required: objective function + random_state=42, # Optional: for reproducibility + initialize={ # Optional: initialization settings + "warm_start": [...], # Starting points + "grid": 4, # Grid initialization points + "random": 2, # Random initialization points + "vertices": 4, # Vertex initialization points + }, +) +# [end:common_parameters] + + +# [start:warm_start_example] +# Start from known good points +optimizer = HillClimbing( + search_space=search_space, + n_iter=50, + experiment=objective, + initialize={ + "warm_start": [ + {"param1": 10, "param2": 0.5}, + {"param1": 20, "param2": 0.3}, + ] + }, +) +# [end:warm_start_example] + + +# [start:initialization_strategies] +# Mix of initialization strategies +optimizer = ParticleSwarmOptimizer( + search_space=search_space, + n_iter=100, + experiment=objective, + initialize={ + "grid": 4, # 4 points on a grid + "random": 6, # 6 random points + "vertices": 4, # 4 corner points + }, +) +# [end:initialization_strategies] + + +# [start:simulated_annealing_config] +from hyperactive.opt.gfo import SimulatedAnnealing + +optimizer = SimulatedAnnealing( + search_space=search_space, + n_iter=100, + experiment=objective, + # Algorithm-specific parameters + # (check API reference for available options) +) +# [end:simulated_annealing_config] + + +# --- Runnable test code below --- +if __name__ == "__main__": + from hyperactive.opt.gfo import ( + BayesianOptimizer, + GeneticAlgorithm, + HillClimbing, + ParticleSwarmOptimizer, + RandomSearch, + SimulatedAnnealing, + ) + + search_space = { + "x": np.arange(-5, 5, 0.5), + "y": np.arange(-5, 5, 0.5), + } + + def objective(params): + x = params["x"] + y = params["y"] + return -(x**2 + y**2) + + # Test a few optimizers + optimizers_to_test = [ + ("HillClimbing", HillClimbing), + ("SimulatedAnnealing", SimulatedAnnealing), + ("RandomSearch", RandomSearch), + ("BayesianOptimizer", BayesianOptimizer), + ] + + for name, OptimizerClass in optimizers_to_test: + if name == "BayesianOptimizer": + optimizer = OptimizerClass( + search_space=search_space, + n_iter=10, + experiment=objective, + ) + else: + optimizer = OptimizerClass( + search_space=search_space, + n_iter=20, + experiment=objective, + ) + best_params = optimizer.solve() + assert "x" in best_params + assert "y" in best_params + print(f"{name} passed!") + + print("All optimizer snippets passed!") diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css new file mode 100644 index 00000000..89cf8ca3 --- /dev/null +++ b/docs/source/_static/css/custom.css @@ -0,0 +1,1300 @@ +/* Custom CSS for Hyperactive documentation */ +/* Purple/Violet theme matching the Hyperactive logo */ + +/* ============================================ + Google Font Import for Hero Title + ============================================ */ +@import url('https://fonts.googleapis.com/css2?family=Raleway:wght@300;400;600;700&display=swap'); + +/* ============================================ + PyData Theme Color Overrides (purple theme) + ============================================ */ +html[data-theme="light"] { + --pst-color-primary: #5D5D7A; + --pst-color-primary-bg: #f0f0f8; + --pst-color-secondary: #7070A0; + --pst-color-accent: #8080B0; + --pst-color-info: #7070A0; + --pst-color-link: #5D5D7A; + --pst-color-link-hover: #4A4A65; + --pst-color-target: rgba(125, 125, 170, 0.2); +} + +html[data-theme="dark"] { + --pst-color-primary: #9090C0; + --pst-color-primary-bg: #2a2a3a; + --pst-color-secondary: #A0A0D0; + --pst-color-accent: #B0B0E0; + --pst-color-info: #9090C0; + --pst-color-link: #A0A0D0; + --pst-color-link-hover: #B0B0E0; + --pst-color-target: rgba(160, 160, 208, 0.2); + --separator-color: #3d3d4d; +} + +/* ============================================ + CSS Variables for consistent theming + ============================================ */ +:root { + /* Purple/violet colors from the Hyperactive logo */ + --hyperactive-primary: #5D5D7A; + --hyperactive-secondary: #7070A0; + --hyperactive-accent: #8080B0; + --hyperactive-dark: #4A4A65; + --hyperactive-light: #f8f9fa; + --hyperactive-gradient: linear-gradient(135deg, #3D3D55 0%, #5858A0 100%); + --hyperactive-gradient-dark: linear-gradient(135deg, #2D2D45 0%, #484890 100%); + --card-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06); + --card-shadow-hover: 0 20px 25px -5px rgba(0, 0, 0, 0.1), 0 10px 10px -5px rgba(0, 0, 0, 0.04); + /* Neutral separator color */ + --separator-color: #dee2e6; + + /* Backend brand colors - from official logos */ + --gfo-light: #80b4ff; + --gfo-dark: #000054; + --gfo-grey: #555555; + --optuna-dark: #123692; + --optuna-light: #3b82c4; + --sklearn-orange: #F7931E; + --sklearn-blue: #29ABE2; + + /* Integration brand colors */ + --sktime-blue: #0075b7; + --sktime-teal: #00a890; + --skpro-blue: #0076b7; + --skpro-teal: #01ab90; + --pytorch-orange: #ee4c2c; + --pytorch-yellow: #eaa700; +} + +/* ============================================ + Top Navbar Styling + ============================================ */ +.bd-header.navbar { + padding-top: 0 !important; + padding-bottom: 0 !important; + min-height: auto !important; +} + +.bd-header .navbar-header-items { + flex-wrap: nowrap !important; +} + +.bd-header .navbar-header-items__center { + flex-wrap: nowrap !important; +} + +.bd-header .navbar-header-items__start { + padding-top: 0 !important; + padding-bottom: 0 !important; +} + +.navbar-brand { + padding: 0.25rem 0 !important; + margin-right: 0.75rem !important; + flex-shrink: 0 !important; +} + +.navbar-brand .logo__image { + height: 36px !important; + width: auto !important; + max-height: 36px !important; + display: inline-block !important; + visibility: visible !important; + opacity: 1 !important; +} + +.navbar-brand p, +.navbar-brand .logo__title { + display: none !important; +} + +.bd-navbar .navbar-nav { + gap: 0.1rem; + flex-wrap: nowrap !important; +} + +.bd-navbar .nav-link { + padding: 0.4rem 0.5rem !important; + white-space: nowrap; + font-size: 0.85rem; +} + +.bd-navbar .dropdown-toggle { + padding: 0.4rem 0.5rem !important; + font-size: 0.85rem; +} + +/* ============================================ + Right Sidebar - On This Page spacing + ============================================ */ +.bd-toc-nav.page-toc { + margin-top: 1.25rem; +} + +.pst-page-navigation-heading { + margin-bottom: 0.5rem; +} + +/* ============================================ + Hero Section + ============================================ */ +.hero-section { + background: var(--hyperactive-gradient); + padding: 3rem 2rem; + margin: -1rem -1rem 2rem -1rem; + border-radius: 0 0 20px 20px; + text-align: center; + position: relative; + overflow: hidden; +} + +.hero-section::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: + radial-gradient(circle at 20% 80%, rgba(255, 255, 255, 0.1) 0%, transparent 50%), + radial-gradient(circle at 80% 20%, rgba(255, 255, 255, 0.1) 0%, transparent 50%); + pointer-events: none; +} + +.hero-content { + position: relative; + z-index: 1; +} + +.hero-title { + font-family: 'Raleway', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; + font-size: 3.5rem; + font-weight: 300; + color: #ffffff; + margin: 0 0 3.5rem 0; + letter-spacing: 0.25em; + text-transform: uppercase; + text-shadow: 0 2px 8px rgba(0, 0, 0, 0.25); +} + +.hero-tagline { + font-size: 1.25rem; + font-weight: 400; + color: rgba(255, 255, 255, 0.9); + margin: 0 0 0.5rem 0; + letter-spacing: 0.05em; + font-style: italic; +} + +/* ============================================ + Back to Top Link (in sidebar) + ============================================ */ +.back-to-top-sidebar { + display: none; + margin-top: 1.5rem; + padding-top: 1rem; + border-top: 1px solid var(--pst-color-border); +} + +.back-to-top-sidebar.visible { + display: block; +} + +.back-to-top-sidebar a { + display: inline-flex; + align-items: center; + gap: 0.5rem; + color: var(--hyperactive-primary); + text-decoration: none; + font-size: 0.85rem; + font-weight: 500; + transition: color 0.2s ease; +} + +.back-to-top-sidebar a:hover { + color: var(--hyperactive-dark); +} + +html[data-theme="dark"] .back-to-top-sidebar a { + color: var(--hyperactive-secondary); +} + +html[data-theme="dark"] .back-to-top-sidebar a:hover { + color: var(--hyperactive-accent); +} + +.hero-subtitle { + font-size: 1.1rem; + color: rgba(255, 255, 255, 0.9); + margin: 0; +} + +/* ============================================ + Stats Strip (below hero) + ============================================ */ +.stats-strip { + display: flex; + justify-content: center; + align-items: stretch; + gap: 0; + margin: 0 0 2rem 0; + padding: 0; + border-radius: 12px; + overflow: hidden; + box-shadow: var(--card-shadow); + border: 1px solid var(--pst-color-border); + background: var(--pst-color-surface); +} + +.stat-item { + flex: 1; + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + padding: 1.25rem 1rem; + text-align: center; + position: relative; + transition: background 0.2s ease; +} + +.stat-item:not(:last-child)::after { + content: ''; + position: absolute; + right: 0; + top: 20%; + height: 60%; + width: 1px; + background: var(--pst-color-border); +} + +.stat-item:hover { + background: var(--pst-color-background); +} + +.stat-value { + font-size: 1.75rem; + font-weight: 700; + color: var(--hyperactive-primary); + line-height: 1.2; + margin-bottom: 0.25rem; +} + +html[data-theme="dark"] .stat-value { + color: var(--hyperactive-secondary); +} + +.stat-label { + font-size: 0.8rem; + font-weight: 500; + color: var(--pst-color-text-muted); + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.stat-icon { + font-size: 1.25rem; + margin-bottom: 0.5rem; + opacity: 0.7; +} + +/* Responsive: stack on mobile */ +@media (max-width: 640px) { + .stats-strip { + flex-wrap: wrap; + } + + .stat-item { + flex: 1 1 50%; + padding: 1rem 0.75rem; + } + + .stat-item:not(:last-child)::after { + display: none; + } + + .stat-item:nth-child(1), + .stat-item:nth-child(2) { + border-bottom: 1px solid var(--pst-color-border); + } + + .stat-item:nth-child(odd) { + border-right: 1px solid var(--pst-color-border); + } + + .stat-value { + font-size: 1.5rem; + } +} + +/* ============================================ + Badge Banner (replaces Maturity Banner) + ============================================ */ +.badge-banner { + text-align: center; + padding: 1rem 1rem 1.5rem; + margin: 1rem 0; +} + +.badge-items { + display: flex; + justify-content: center; + align-items: center; + gap: 0.75rem; + flex-wrap: wrap; +} + +.badge-items a { + display: inline-block; + line-height: 0; +} + +.badge-items img { + height: 28px; + vertical-align: middle; + border-radius: 0 !important; + image-rendering: crisp-edges; +} + +/* Legacy Maturity Banner (kept for backwards compatibility) */ +.maturity-banner { + background: linear-gradient(90deg, #f8f9fa 0%, #e9ecef 50%, #f8f9fa 100%); + border-radius: 12px; + padding: 1rem 1.5rem; + margin: 1.5rem 0; + border: 1px solid #dee2e6; +} + +html[data-theme="dark"] .maturity-banner { + background: linear-gradient(90deg, #2d2d3a 0%, #252530 50%, #2d2d3a 100%); + border-color: #3d3d4d; +} + +.maturity-items { + display: flex; + justify-content: center; + gap: 2rem; + flex-wrap: wrap; +} + +.maturity-item { + display: flex; + align-items: center; + gap: 0.5rem; + color: #495057; +} + +html[data-theme="dark"] .maturity-item { + color: #adb5bd; +} + +.maturity-icon { + font-size: 1.25rem; +} + +.maturity-text { + font-size: 0.95rem; +} + +/* ============================================ + Feature Cards + ============================================ */ +.feature-card { + border: 1px solid #e9ecef !important; + border-left: 3px solid var(--hyperactive-primary) !important; + box-shadow: var(--card-shadow); + transition: all 0.2s ease; + border-radius: 8px !important; + overflow: hidden; +} + +html[data-theme="dark"] .feature-card { + border-color: #3d3d4d !important; + border-left-color: var(--hyperactive-secondary) !important; +} + +.feature-card:hover { + box-shadow: var(--card-shadow-hover); + border-left-color: var(--hyperactive-secondary) !important; +} + +html[data-theme="dark"] .feature-card:hover { + border-left-color: var(--hyperactive-accent) !important; +} + +.feature-card .sd-card-body { + padding: 1.25rem; +} + +.feature-card .sd-card-footer { + font-size: 0.85rem; + color: var(--hyperactive-dark); + padding: 0; + background: #f0f1f3; + border-top: 1px solid #e0e2e6; +} + +.feature-card .sd-card-footer p { + margin: 0; +} + +.feature-card .sd-card-footer a { + display: flex; + align-items: center; + padding: 0.75rem 1.25rem; + color: var(--hyperactive-dark); + text-decoration: none; + transition: all 0.2s ease; + position: relative; +} + +.feature-card .sd-card-footer a::after { + content: "→"; + margin-left: auto; + padding-left: 1rem; + opacity: 0; + transform: translateX(-8px); + transition: all 0.2s ease; +} + +.feature-card .sd-card-footer a:hover { + background: #e4e6ea; +} + +.feature-card .sd-card-footer a:hover::after { + opacity: 0.6; + transform: translateX(0); +} + +html[data-theme="dark"] .feature-card .sd-card-footer { + background: #2a2a3a; + border-top-color: #3d3d4d; + color: #c8c8d8; +} + +html[data-theme="dark"] .feature-card .sd-card-footer a { + color: #c8c8d8; +} + +html[data-theme="dark"] .feature-card .sd-card-footer a:hover { + background: #3a3a4a; +} + +/* ============================================ + Algorithm Cards + ============================================ */ +.algo-card { + border-left: 4px solid var(--hyperactive-primary) !important; + background: linear-gradient(135deg, #ffffff 0%, #f8f9fa 100%); + transition: all 0.2s ease; +} + +html[data-theme="dark"] .algo-card { + background: linear-gradient(135deg, #2d2d3a 0%, #252530 100%); +} + +.algo-card:hover { + border-left-color: var(--hyperactive-secondary) !important; + transform: translateX(4px); +} + +.algo-card ul { + margin: 0; + padding-left: 1.2rem; +} + +.algo-card li { + margin-bottom: 0.3rem; + font-size: 0.9rem; +} + +.optuna-card { + border-left-color: var(--hyperactive-accent) !important; +} + +.optuna-card:hover { + border-left-color: var(--hyperactive-primary) !important; +} + +/* ============================================ + Backend Cards (GFO, Optuna, sklearn) + ============================================ */ +.backend-card { + border-radius: 8px !important; + overflow: hidden; + box-shadow: var(--card-shadow); + border: none !important; + position: relative; + background: var(--pst-color-surface); +} + +/* Side accent bar using pseudo-element for multi-color support */ +.backend-card::before { + content: ''; + position: absolute; + left: 0; + top: 0; + bottom: 0; + width: 6px; + border-radius: 8px 0 0 8px; +} + +.backend-card .sd-card-header { + padding: 1.25rem 1.5rem 0.75rem; + border-bottom: none; + background: transparent; +} + +.backend-card .sd-card-header .sd-card-text { + font-size: 1.25rem; + font-weight: 600; + margin: 0; +} + +.backend-card .sd-card-header a { + text-decoration: none; + transition: opacity 0.2s ease; +} + +.backend-card .sd-card-header a:hover { + text-decoration: underline; +} + +.backend-card .sd-card-body { + padding: 0.5rem 1.5rem 1rem; + font-size: 0.95rem; + line-height: 1.6; +} + +.backend-card .sd-card-body ul { + margin: 0.75rem 0 0 0; + padding-left: 1.25rem; +} + +.backend-card .sd-card-body li { + margin-bottom: 0.4rem; +} + +.backend-card .sd-card-footer { + padding: 0; + border-top: 1px solid var(--pst-color-border); + background: transparent; + transition: background 0.2s ease; +} + +.backend-card .sd-card-footer a { + display: flex; + align-items: center; + padding: 0.875rem 1.5rem; + text-decoration: none; + font-weight: 500; + transition: all 0.2s ease; + position: relative; +} + +.backend-card .sd-card-footer a::after { + content: "→"; + margin-left: auto; + padding-left: 1rem; + opacity: 0; + transform: translateX(-8px); + transition: all 0.2s ease; +} + +.backend-card .sd-card-footer:hover { + background: var(--pst-color-background); +} + +.backend-card .sd-card-footer a:hover::after { + opacity: 0.6; + transform: translateX(0); +} + +/* GFO: Tri-color (dark navy | light blue | grey) */ +.backend-card-gfo::before { + background: linear-gradient(to bottom, + var(--gfo-dark) 0%, var(--gfo-dark) 33%, + var(--gfo-light) 33%, var(--gfo-light) 66%, + var(--gfo-grey) 66%, var(--gfo-grey) 100%); +} + +/* Optuna: Vertical gradient (dark blue → light blue) */ +.backend-card-optuna::before { + background: linear-gradient(to bottom, var(--optuna-dark) 0%, var(--optuna-light) 100%); +} + +/* sklearn: 50/50 split (orange | cyan) */ +.backend-card-sklearn::before { + background: linear-gradient(to bottom, var(--sklearn-orange) 50%, var(--sklearn-blue) 50%); +} + +/* ============================================ + Integration Cards (horizontal, two-part) + ============================================ */ +.integration-grid { + display: grid; + grid-template-columns: repeat(2, 1fr); + gap: 1.25rem; + margin: 1.5rem 0; +} + +@media (max-width: 768px) { + .integration-grid { + grid-template-columns: 1fr; + } +} + +.integration-card { + background: var(--pst-color-surface); + border-radius: 8px; + display: flex; + box-shadow: var(--card-shadow); + position: relative; + overflow: hidden; +} + +.integration-card::before { + content: ''; + position: absolute; + left: 0; + top: 0; + bottom: 0; + width: 5px; +} + +.integration-left { + padding: 1.25rem 1.5rem; + display: flex; + align-items: center; + min-width: 140px; +} + +.integration-left a { + font-size: 1.1rem; + font-weight: 600; + text-decoration: none; + transition: opacity 0.2s ease; +} + +.integration-left a:hover { + text-decoration: underline; +} + +.integration-right { + flex: 1; + display: flex; + align-items: center; + border-left: 1px solid var(--pst-color-border); + position: relative; + transition: background 0.2s ease; +} + +.integration-right a { + display: flex; + align-items: center; + padding: 1.25rem 1.5rem; + width: 100%; + height: 100%; + text-decoration: none; + color: inherit; + transition: all 0.2s ease; +} + +.integration-right a::after { + content: "→"; + margin-left: auto; + padding-left: 1rem; + opacity: 0; + transform: translateX(-8px); + transition: all 0.2s ease; +} + +.integration-right:hover { + background: var(--pst-color-background); +} + +.integration-right:hover a::after { + opacity: 0.6; + transform: translateX(0); +} + +.integration-desc { + font-size: 0.95rem; + line-height: 1.4; +} + +/* Integration card colors */ +.integration-card.sklearn::before { + background: linear-gradient(to bottom, var(--sklearn-orange) 50%, var(--sklearn-blue) 50%); +} + +.integration-card.sktime::before { + background: linear-gradient(to bottom, var(--sktime-blue) 0%, var(--sktime-teal) 100%); +} + +.integration-card.skpro::before { + background: linear-gradient(to bottom, var(--skpro-blue) 0%, var(--skpro-teal) 100%); +} + +.integration-card.pytorch::before { + background: linear-gradient(to bottom, var(--pytorch-orange) 50%, var(--pytorch-yellow) 50%); +} + +/* ============================================ + Navigation Cards + ============================================ */ +/* ============================================ + Contents Navigation Cards (horizontal style) + ============================================ */ +.contents-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 1rem; + margin: 1.5rem 0; +} + +@media (max-width: 992px) { + .contents-grid { + grid-template-columns: repeat(2, 1fr); + } +} + +@media (max-width: 576px) { + .contents-grid { + grid-template-columns: 1fr; + } +} + +.contents-card { + background: var(--pst-color-surface); + border-radius: 8px; + display: flex; + align-items: stretch; + box-shadow: var(--card-shadow); + position: relative; + overflow: hidden; + text-decoration: none; + color: inherit; + transition: box-shadow 0.2s ease; +} + +.contents-card::before { + content: ''; + position: absolute; + left: 0; + top: 0; + bottom: 0; + width: 4px; + background: linear-gradient(to bottom, var(--hyperactive-primary) 0%, var(--hyperactive-secondary) 100%); +} + +.contents-card:hover { + box-shadow: var(--card-shadow-hover); +} + +.contents-card-inner { + display: flex; + align-items: center; + width: 100%; + padding: 1rem 1.25rem 1rem 1.5rem; + transition: all 0.2s ease; +} + +.contents-card:hover .contents-card-inner { + background: var(--pst-color-background); +} + +.contents-card-text { + flex: 1; +} + +.contents-card-title { + font-size: 1rem; + font-weight: 600; + margin: 0 0 0.25rem 0; +} + +.contents-card-desc { + font-size: 0.85rem; + opacity: 0.7; + margin: 0; + line-height: 1.3; +} + +.contents-card-arrow { + opacity: 0; + transform: translateX(-8px); + transition: all 0.2s ease; + margin-left: 1rem; + font-size: 1rem; +} + +.contents-card:hover .contents-card-arrow { + opacity: 0.6; + transform: translateX(0); +} + +/* ============================================ + Visualization Section + ============================================ */ +.visualization-section { + text-align: center; + padding: 2rem 0; +} + +.visualization-section h2 { + color: var(--hyperactive-primary); + margin-bottom: 0.5rem; +} + +.visualization-section p { + color: #6c757d; + margin-bottom: 1.5rem; +} + +.optimization-gif { + max-width: 100%; + max-height: 400px; + border-radius: 12px; + box-shadow: var(--card-shadow-hover); + border: 3px solid var(--hyperactive-primary); +} + +/* ============================================ + Sponsor Section + ============================================ */ +.sponsor-section { + text-align: center; + padding: 1.5rem 0; +} + +.sponsor-section img { + transition: transform 0.2s ease; +} + +.sponsor-section img:hover { + transform: scale(1.05); +} + +/* ============================================ + Code Blocks Enhancement + ============================================ */ +.highlight { + border-radius: 8px; + overflow: hidden; +} + +.highlight pre { + border-radius: 8px; + padding: 1rem; +} + +/* ============================================ + Tab Set Styling + ============================================ */ +.sd-tab-set { + border-radius: 12px; + overflow: hidden; + box-shadow: var(--card-shadow); +} + +.sd-tab-label { + font-weight: 500; + transition: all 0.2s ease; +} + +.sd-tab-label:hover { + background-color: rgba(93, 93, 122, 0.1); +} + +input[name^="sd-tab-set"]:checked + label { + border-bottom-color: var(--hyperactive-primary) !important; + color: var(--hyperactive-primary); +} + +/* ============================================ + Section Separators - Full Width Horizontal Lines + ============================================ */ +hr { + border: none; + height: 1px; + background-color: var(--separator-color); + margin: 2.5rem 0; + width: 100%; +} + +/* ============================================ + Section Headers - Clean without decorative underlines + ============================================ */ +h2 { + position: relative; + padding-bottom: 0; +} + +/* Remove the short underline pseudo-element */ +h2::after { + display: none; +} + +/* ============================================ + Button Enhancements + ============================================ */ +.sd-btn-primary { + background: var(--hyperactive-gradient) !important; + border: none !important; + transition: all 0.3s ease !important; +} + +.sd-btn-primary:hover { + background: var(--hyperactive-gradient-dark) !important; + transform: translateY(-1px); + box-shadow: 0 4px 12px rgba(93, 93, 122, 0.4); +} + +/* ============================================ + Responsive Adjustments + ============================================ */ +@media (max-width: 768px) { + .hero-section { + padding: 2rem 1rem; + margin: -1rem -0.5rem 1.5rem -0.5rem; + } + + .hero-title { + font-size: 2.5rem; + } + + .hero-tagline { + font-size: 1.4rem; + } + + .hero-subtitle { + font-size: 1rem; + } + + .maturity-items { + gap: 1rem; + } + + .maturity-item { + font-size: 0.85rem; + } + + .feature-icon { + font-size: 2rem; + } +} + +@media (max-width: 480px) { + .hero-title { + font-size: 2rem; + } + + .maturity-items { + flex-direction: column; + gap: 0.75rem; + } +} + +/* ============================================ + Print Styles + ============================================ */ +@media print { + .hero-section { + background: #f8f9fa !important; + color: #000 !important; + } + + .hero-title, + .hero-tagline, + .hero-subtitle { + color: #000 !important; + } +} + +/* ============================================ + Vertical Tabs Component + ============================================ */ +.vertical-tabs { + display: flex; + gap: 1.5rem; + margin: 1.5rem 0; +} + +.vertical-tabs-nav { + display: flex; + flex-direction: column; + width: 190px; + flex-shrink: 0; + background: var(--pst-color-background); + border-radius: 10px; + padding: 0.5rem; + box-shadow: var(--card-shadow); + border: 1px solid var(--pst-color-border); + align-self: flex-start; +} + +.vertical-tab-btn { + display: flex; + align-items: center; + padding: 0.75rem 1rem; + background: transparent; + border: none; + border-left: 3px solid transparent; + border-radius: 6px; + text-align: left; + font-size: 0.875rem; + font-weight: 500; + color: var(--pst-color-text-muted); + cursor: pointer; + transition: all 0.2s ease; + position: relative; + margin: 2px 0; +} + +.vertical-tab-btn:hover { + background: var(--pst-color-surface); + color: var(--pst-color-text-base); +} + +.vertical-tab-btn.active { + border-left-color: var(--hyperactive-primary); + background: var(--pst-color-surface); + color: var(--hyperactive-primary); +} + +html[data-theme="dark"] .vertical-tab-btn.active { + border-left-color: var(--hyperactive-secondary); + color: var(--hyperactive-secondary); +} + +.vertical-tab-btn .tab-indicator { + width: 6px; + height: 6px; + border-radius: 50%; + background: var(--hyperactive-primary); + margin-right: 0.75rem; + opacity: 0; + transform: scale(0); + transition: all 0.2s ease; +} + +.vertical-tab-btn.active .tab-indicator { + opacity: 1; + transform: scale(1); +} + +html[data-theme="dark"] .vertical-tab-btn .tab-indicator { + background: var(--hyperactive-secondary); +} + +.vertical-tabs-content { + flex: 1; + min-height: 200px; + overflow: hidden; +} + +.vertical-tab-panel { + display: none; +} + +.vertical-tab-panel.active { + display: block; + animation: revealVertical 0.35s ease-out; +} + +@keyframes revealVertical { + from { + opacity: 0; + clip-path: inset(0 0 100% 0); + } + to { + opacity: 1; + clip-path: inset(0 0 0 0); + } +} + +/* Code block styling within vertical tabs */ +.vertical-tab-panel .highlight { + margin: 0; + border-radius: 4px; + box-shadow: var(--card-shadow); + height: 380px; + overflow: hidden; +} + +.vertical-tab-panel .highlight pre { + margin: 0; + padding: 1rem 1.25rem; + font-size: 0.85rem; + line-height: 1.5; + height: 100%; + overflow-y: auto; + border-radius: 4px; +} + +/* Responsive: Convert to horizontal on smaller screens */ +@media (max-width: 768px) { + .vertical-tabs { + flex-direction: column; + gap: 1rem; + } + + .vertical-tabs-nav { + flex-direction: row; + min-width: 100%; + overflow-x: auto; + padding: 0.25rem; + -webkit-overflow-scrolling: touch; + scrollbar-width: none; + } + + .vertical-tabs-nav::-webkit-scrollbar { + display: none; + } + + .vertical-tab-btn { + flex-shrink: 0; + border-left: none; + border-bottom: 3px solid transparent; + padding: 0.625rem 1rem; + white-space: nowrap; + margin: 0 2px; + } + + .vertical-tab-btn.active { + border-left-color: transparent; + border-bottom-color: var(--hyperactive-primary); + } + + html[data-theme="dark"] .vertical-tab-btn.active { + border-bottom-color: var(--hyperactive-secondary); + } + + .vertical-tab-btn .tab-indicator { + display: none; + } +} + +/* Section title styling for Quick Install/Example */ +.vertical-tabs-section-title { + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.05em; + color: var(--pst-color-text-muted); + padding: 0.75rem 1.25rem 0.5rem; + margin: 0; +} + +/* Tab description subtitle */ +.vertical-tab-btn .tab-subtitle { + display: block; + font-size: 0.75rem; + font-weight: 400; + color: var(--pst-color-text-muted); + margin-top: 0.125rem; +} + +.vertical-tab-btn.active .tab-subtitle { + color: var(--hyperactive-secondary); +} + +html[data-theme="dark"] .vertical-tab-btn.active .tab-subtitle { + color: var(--hyperactive-accent); +} + +/* ============================================ + Segmented Control Tabs (for Quick Install) + ============================================ */ +.segmented-tabs { + margin: 1.5rem 0; +} + +.segmented-tabs-nav { + display: inline-flex; + background: var(--pst-color-background); + border-radius: 10px; + padding: 4px; + gap: 2px; + box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.08); + border: 1px solid var(--pst-color-border); +} + +.segmented-tab-btn { + padding: 0.5rem 1.25rem; + border: none; + background: transparent; + border-radius: 8px; + font-size: 0.875rem; + font-weight: 500; + color: var(--pst-color-text-muted); + cursor: pointer; + transition: all 0.2s ease; + white-space: nowrap; +} + +.segmented-tab-btn:hover { + color: var(--pst-color-text-base); +} + +.segmented-tab-btn.active { + background: var(--pst-color-surface); + color: var(--hyperactive-primary); + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); +} + +html[data-theme="dark"] .segmented-tab-btn.active { + color: var(--hyperactive-secondary); + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.3); +} + +.segmented-tabs-content { + margin-top: 1rem; +} + +.segmented-tab-panel { + display: none; + animation: segmentedFadeIn 0.2s ease; +} + +.segmented-tab-panel.active { + display: block; +} + +@keyframes segmentedFadeIn { + from { + opacity: 0; + transform: translateY(-4px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.segmented-tab-panel .highlight { + border-radius: 8px; + margin: 0; +} + +.segmented-tab-panel .highlight pre { + margin: 0; + padding: 0.875rem 1.25rem; + font-size: 0.9rem; +} + +/* Responsive: stack on very small screens */ +@media (max-width: 540px) { + .segmented-tabs-nav { + flex-wrap: wrap; + width: 100%; + } + + .segmented-tab-btn { + flex: 1 1 auto; + text-align: center; + padding: 0.5rem 0.75rem; + } +} diff --git a/docs/source/_static/fields.css b/docs/source/_static/fields.css new file mode 100644 index 00000000..48ec4964 --- /dev/null +++ b/docs/source/_static/fields.css @@ -0,0 +1,21 @@ +/* CSS for parameter fields and other structured documentation elements */ + +/* Styling for parameter lists */ +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 0.5em; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-bottom: 0.5em; + margin-left: 0em; +} diff --git a/docs/source/_static/images/badges/generate_badges.py b/docs/source/_static/images/badges/generate_badges.py new file mode 100644 index 00000000..96fb09c3 --- /dev/null +++ b/docs/source/_static/images/badges/generate_badges.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 +""" +Badge Generator for Hyperactive Documentation + +Generates local SVG badges based on project information from pyproject.toml. +This eliminates dependency on external badge services like shields.io. + +Usage: + python generate_badges.py + +Output: + - version.svg + - python.svg + - license.svg + - sponsor.svg +""" + +import re +from pathlib import Path + +try: + import tomllib +except ImportError: + import tomli as tomllib + + +# Hyperactive color palette +COLORS = { + "primary": "#5D5D7A", + "secondary": "#7070A0", + "dark": "#4A4A65", + "light": "#f8f9fa", + "sponsor": "#0eac92", + "label_bg": "#555", +} + + +def create_badge_svg( + label: str, value: str, color: str, label_width: int = None, value_width: int = None +) -> str: + """ + Create an SVG badge in shields.io flat-square style. + + Args: + label: Left side text (e.g., "version") + value: Right side text (e.g., "5.0.2") + color: Hex color for the value background + label_width: Override calculated label width + value_width: Override calculated value width + + Returns + ------- + SVG string + """ + # Approximate width calculation (7px per character + padding) + char_width = 6.5 + padding = 10 + + lw = label_width or int(len(label) * char_width + padding * 2) + vw = value_width or int(len(value) * char_width + padding * 2) + total_width = lw + vw + height = 20 + + svg = f""" + {label}: {value} + + + + + + {label} + + {value} +""" + + return svg + + +def create_simple_badge_svg( + text: str, color: str, width: int = None, font_size: int = 11 +) -> str: + """ + Create a simple single-section SVG badge. + + Args: + text: Badge text + color: Background color + width: Override calculated width + font_size: Font size in pixels + + Returns + ------- + SVG string + """ + char_width = 7 + padding = 16 + w = width or int(len(text) * char_width + padding * 2) + height = 20 + text_y = 14 if font_size >= 11 else 13.5 + + svg = f""" + {text} + + {text} +""" + + return svg + + +def extract_python_versions(classifiers: list) -> str: + """Extract Python version range from classifiers.""" + versions = [] + pattern = r"Programming Language :: Python :: (\d+\.\d+)" + + for classifier in classifiers: + match = re.match(pattern, classifier) + if match: + versions.append(match.group(1)) + + if not versions: + return "3.10+" + + versions.sort(key=lambda v: tuple(map(int, v.split(".")))) + + if len(versions) == 1: + return versions[0] + + return f"{versions[0]} - {versions[-1]}" + + +def main(): + # Find pyproject.toml + script_dir = Path(__file__).parent + project_root = script_dir.parents[4] # Go up from docs/source/_static/images/badges + pyproject_path = project_root / "pyproject.toml" + + if not pyproject_path.exists(): + print(f"Error: pyproject.toml not found at {pyproject_path}") + return + + # Read project info + with open(pyproject_path, "rb") as f: + pyproject = tomllib.load(f) + + project = pyproject.get("project", {}) + version = project.get("version", "0.0.0") + classifiers = project.get("classifiers", []) + + # Extract license from classifiers + license_name = "MIT" # Default + for classifier in classifiers: + if "License :: OSI Approved ::" in classifier: + license_name = classifier.split("::")[-1].strip() + # Shorten common license names + if "MIT" in license_name: + license_name = "MIT" + elif "BSD" in license_name: + license_name = "BSD" + elif "Apache" in license_name: + license_name = "Apache 2.0" + break + + python_range = extract_python_versions(classifiers) + + print(f"Generating badges for Hyperactive v{version}") + print(f" Python: {python_range}") + print(f" License: {license_name}") + + # Generate badges + badges = { + "version.svg": create_badge_svg("version", f"v{version}", COLORS["primary"]), + "python.svg": create_badge_svg("python", python_range, COLORS["primary"]), + "license.svg": create_badge_svg("license", license_name, COLORS["primary"]), + "sponsor.svg": create_badge_svg("GC.OS", "Sponsored", COLORS["sponsor"]), + } + + # Write badges + for filename, svg_content in badges.items(): + filepath = script_dir / filename + with open(filepath, "w") as f: + f.write(svg_content) + print(f" Created: {filename}") + + print("\nDone! Badges generated successfully.") + + +if __name__ == "__main__": + main() diff --git a/docs/source/_static/images/badges/license.svg b/docs/source/_static/images/badges/license.svg new file mode 100644 index 00000000..ed760277 --- /dev/null +++ b/docs/source/_static/images/badges/license.svg @@ -0,0 +1,11 @@ + + license: MIT + + + + + + license + + MIT + diff --git a/docs/source/_static/images/badges/python.svg b/docs/source/_static/images/badges/python.svg new file mode 100644 index 00000000..855acfcf --- /dev/null +++ b/docs/source/_static/images/badges/python.svg @@ -0,0 +1,11 @@ + + python: 3.10 - 3.14 + + + + + + python + + 3.10 - 3.14 + diff --git a/docs/source/_static/images/badges/sponsor.svg b/docs/source/_static/images/badges/sponsor.svg new file mode 100644 index 00000000..b140d8ec --- /dev/null +++ b/docs/source/_static/images/badges/sponsor.svg @@ -0,0 +1,11 @@ + + GC.OS: Sponsored + + + + + + GC.OS + + Sponsored + diff --git a/docs/source/_static/images/badges/version.svg b/docs/source/_static/images/badges/version.svg new file mode 100644 index 00000000..8ed5094b --- /dev/null +++ b/docs/source/_static/images/badges/version.svg @@ -0,0 +1,11 @@ + + version: v5.0.2 + + + + + + version + + v5.0.2 + diff --git a/docs/source/_static/images/bayes_convex.gif b/docs/source/_static/images/bayes_convex.gif new file mode 100644 index 00000000..3133e2c4 Binary files /dev/null and b/docs/source/_static/images/bayes_convex.gif differ diff --git a/docs/source/_static/images/logo.png b/docs/source/_static/images/logo.png new file mode 100644 index 00000000..03e2f9d5 Binary files /dev/null and b/docs/source/_static/images/logo.png differ diff --git a/docs/source/_static/images/navbar_logo.png b/docs/source/_static/images/navbar_logo.png new file mode 100644 index 00000000..8aee0851 Binary files /dev/null and b/docs/source/_static/images/navbar_logo.png differ diff --git a/docs/source/_static/images/navbar_logo.svg b/docs/source/_static/images/navbar_logo.svg new file mode 100644 index 00000000..14ca60cf --- /dev/null +++ b/docs/source/_static/images/navbar_logo.svg @@ -0,0 +1,326 @@ + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/source/_static/images/navbar_logo_dark.svg b/docs/source/_static/images/navbar_logo_dark.svg new file mode 100644 index 00000000..be74742d --- /dev/null +++ b/docs/source/_static/images/navbar_logo_dark.svg @@ -0,0 +1,326 @@ + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/source/_templates/class.rst b/docs/source/_templates/class.rst new file mode 100644 index 00000000..17d61dc4 --- /dev/null +++ b/docs/source/_templates/class.rst @@ -0,0 +1,10 @@ +{{objname}} +{{ underline }}============== + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + +.. raw:: html + +
diff --git a/docs/source/_templates/function.rst b/docs/source/_templates/function.rst new file mode 100644 index 00000000..54226b73 --- /dev/null +++ b/docs/source/_templates/function.rst @@ -0,0 +1,10 @@ +{{objname}} +{{ underline }}==================== + +.. currentmodule:: {{ module }} + +.. autofunction:: {{ objname }} + +.. raw:: html + +
diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html new file mode 100644 index 00000000..b43b96a9 --- /dev/null +++ b/docs/source/_templates/layout.html @@ -0,0 +1,6 @@ +{% extends "pydata_sphinx_theme/layout.html" %} + +{% block extrahead %} +{{ super() }} + +{% endblock %} diff --git a/docs/source/about.rst b/docs/source/about.rst new file mode 100644 index 00000000..7b342c8b --- /dev/null +++ b/docs/source/about.rst @@ -0,0 +1,107 @@ +.. _about: + +===== +About +===== + +Hyperactive is an optimization and data collection toolbox for convenient and fast +prototyping of computationally expensive models. + +.. toctree:: + :maxdepth: 1 + + about/team + about/history + about/license + + +About Hyperactive +----------------- + +Hyperactive provides a unified interface for hyperparameter optimization using +various gradient-free optimization algorithms. It supports optimization for +scikit-learn, sktime, skpro, and PyTorch Lightning models, as well as custom +objective functions. + + +Mission +^^^^^^^ + +Hyperactive aims to make hyperparameter optimization accessible and practical for +machine learning practitioners. By providing a unified API across many optimization +algorithms and ML frameworks, it reduces the barrier to finding optimal model +configurations. + + +Key Features +^^^^^^^^^^^^ + +- **20+ Optimization Algorithms**: From simple hill climbing to advanced Bayesian + optimization, population methods, and Optuna integration. + +- **Experiment-Based Architecture**: Clean separation between what to optimize + (experiments) and how to optimize (algorithms). + +- **Framework Integrations**: First-class support for scikit-learn, sktime, skpro, + and PyTorch Lightning. + +- **Flexible Search Spaces**: Discrete, continuous, and mixed parameter spaces + using familiar NumPy/list syntax. + +- **Production Ready**: Battle-tested since 2019 with comprehensive testing and + active maintenance. + + +Related Projects +^^^^^^^^^^^^^^^^ + +Hyperactive is part of a larger ecosystem: + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Project + - Description + * - `Gradient-Free-Optimizers `_ + - Core optimization algorithms used by Hyperactive + * - `Search-Data-Collector `_ + - Save search data during optimization to CSV files + * - `Search-Data-Explorer `_ + - Visualize search data with Plotly in a Streamlit dashboard + + +Sponsorship +^^^^^^^^^^^ + +Hyperactive is sponsored by the +`German Center for Open Source AI (GC.OS) `_. + +.. image:: https://img.shields.io/badge/GC.OS-Sponsored%20Project-orange.svg?style=for-the-badge&colorA=0eac92&colorB=2077b4 + :target: https://gc-os-ai.github.io/ + :alt: GC.OS Sponsored + + +Citing Hyperactive +^^^^^^^^^^^^^^^^^^ + +If you use Hyperactive in your research, please cite it: + +.. code-block:: bibtex + + @Misc{hyperactive2021, + author = {{Simon Blanke}}, + title = {{Hyperactive}: An optimization and data collection toolbox + for convenient and fast prototyping of computationally + expensive models.}, + howpublished = {\url{https://github.com/SimonBlanke}}, + year = {since 2019} + } + + +Community +^^^^^^^^^ + +- **GitHub**: `SimonBlanke/Hyperactive `_ +- **Discord**: `Join the community `_ +- **LinkedIn**: `German Center for Open Source AI `_ diff --git a/docs/source/about/history.rst b/docs/source/about/history.rst new file mode 100644 index 00000000..ce3033ff --- /dev/null +++ b/docs/source/about/history.rst @@ -0,0 +1,126 @@ +.. _history: + +======= +History +======= + +This page documents the history and evolution of Hyperactive. + + +Project History +--------------- + +Hyperactive was created in 2018 by Simon Blanke to address the need for a flexible, +unified interface for hyperparameter optimization in machine learning workflows. + + +Timeline +^^^^^^^^ + +**2018 - Project Creation** + Hyperactive was first released as an open-source project, providing a collection + of gradient-free optimization algorithms accessible through a simple Python API. + +**2019 - Growing Adoption** + The project gained traction in the machine learning community, with users + appreciating its straightforward interface and variety of optimization algorithms. + +**2020-2021 - Ecosystem Expansion** + Related projects were developed to complement Hyperactive: + + - **Gradient-Free-Optimizers**: The optimization backend was extracted into its + own package, allowing for more modular development. + - **Search-Data-Collector**: Tools for saving optimization results. + - **Search-Data-Explorer**: Visualization dashboard for exploring search data. + +**2022-2023 - Continued Development** + Active maintenance continued with bug fixes, new algorithms, and improved + documentation. The user base continued to grow. + +**2024 - Version 5.0 Redesign** + Major architecture redesign introducing: + + - **Experiment-based architecture**: Clean separation between optimization + problems (experiments) and optimization algorithms (optimizers). + - **Enhanced integrations**: Improved support for scikit-learn, sktime, skpro, + and PyTorch Lightning. + - **Optuna backend**: Integration with Optuna's optimization algorithms. + - **Modern Python support**: Support for Python 3.10 through 3.14. + +**2024 - GC.OS Sponsorship** + Hyperactive became a sponsored project of the German Center for Open Source AI + (GC.OS), ensuring continued development and maintenance. + + +Version History +--------------- + +Major Versions +^^^^^^^^^^^^^^ + +.. list-table:: + :header-rows: 1 + :widths: 15 85 + + * - Version + - Highlights + * - v5.0 + - Experiment-based architecture, Optuna integration, modern Python support + * - v4.x + - Improved API stability, additional optimizers + * - v3.x + - Search data collection features, expanded algorithm library + * - v2.x + - Multi-processing support, warm starting + * - v1.x + - Initial public release with core optimization algorithms + + +Breaking Changes +^^^^^^^^^^^^^^^^ + +Major version updates (e.g., v4 → v5) may include breaking API changes. +If you're upgrading from an older version: + +1. Check the `GitHub releases `_ + for migration guides. +2. Update your code to use the new API patterns. +3. Alternatively, pin your version to continue using the old API. + +.. code-block:: bash + + # Upgrade to latest + pip install hyperactive --upgrade + + # Or pin to specific version + pip install hyperactive==4.x.x + + +Legacy Documentation +^^^^^^^^^^^^^^^^^^^^ + +Documentation for Hyperactive v4 is still available at the legacy documentation site: + +`Legacy Documentation (v4) `_ + +This may be useful if you: + +- Are maintaining projects that use Hyperactive v4 +- Need to reference the previous API design +- Want to compare the old and new approaches + + +Future Roadmap +-------------- + +Hyperactive continues to evolve. Planned improvements include: + +- Additional optimization algorithms +- Enhanced visualization tools +- Improved distributed computing support +- More framework integrations +- Performance optimizations + +For the latest roadmap, see the +`GitHub Issues `_ and +`Discussions `_. diff --git a/docs/source/about/license.rst b/docs/source/about/license.rst new file mode 100644 index 00000000..498ed9ad --- /dev/null +++ b/docs/source/about/license.rst @@ -0,0 +1,99 @@ +.. _license: + +======= +License +======= + +Hyperactive is open-source software released under the MIT License. + + +MIT License +----------- + +.. code-block:: text + + MIT License + + Copyright (c) 2018 Simon Blanke + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + +What This Means +--------------- + +The MIT License is a permissive open-source license that allows you to: + +**You Can:** + +- Use Hyperactive for commercial projects +- Modify the source code +- Distribute copies +- Use it in private projects +- Sublicense the code + +**Requirements:** + +- Include the copyright notice and license text in any copies or substantial + portions of the software + +**Limitations:** + +- The software is provided "as is" without warranty +- The authors are not liable for any damages + + +Third-Party Licenses +-------------------- + +Hyperactive depends on several third-party packages, each with their own licenses: + +.. list-table:: + :header-rows: 1 + :widths: 30 30 40 + + * - Package + - License + - Purpose + * - NumPy + - BSD-3-Clause + - Numerical operations + * - pandas + - BSD-3-Clause + - Data manipulation + * - scikit-learn + - BSD-3-Clause + - Machine learning integration + * - Gradient-Free-Optimizers + - MIT + - Core optimization algorithms + * - tqdm + - MIT/MPL-2.0 + - Progress bars + +All dependencies use permissive open-source licenses compatible with commercial use. + + +Questions +--------- + +For questions about licensing or usage rights, please contact: + +- **Email**: simon.blanke@yahoo.com +- **GitHub Issues**: `Report an issue `_ diff --git a/docs/source/about/team.rst b/docs/source/about/team.rst new file mode 100644 index 00000000..352c4006 --- /dev/null +++ b/docs/source/about/team.rst @@ -0,0 +1,102 @@ +.. _team: + +==== +Team +==== + +Hyperactive is developed and maintained by a dedicated team of open-source contributors. + + +Core Team +--------- + +Simon Blanke +^^^^^^^^^^^^ + +**Creator and Lead Maintainer** + +Simon Blanke is the creator and primary maintainer of Hyperactive. He started the +project in 2018 to make hyperparameter optimization more accessible to machine +learning practitioners. + +- **GitHub**: `@SimonBlanke `_ +- **Email**: simon.blanke@yahoo.com + +Simon also maintains the related projects in the Hyperactive ecosystem: + +- `Gradient-Free-Optimizers `_ +- `Search-Data-Collector `_ +- `Search-Data-Explorer `_ + + +Contributors +------------ + +Hyperactive benefits from contributions by many developers. We appreciate everyone +who has helped improve the project through code, documentation, bug reports, and +feature suggestions. + +To see a full list of contributors, visit the +`GitHub contributors page `_. + + +Become a Contributor +^^^^^^^^^^^^^^^^^^^^ + +We welcome contributions from the community! There are many ways to get involved: + +- **Report bugs**: Open an issue on GitHub +- **Suggest features**: Share your ideas in GitHub Discussions +- **Contribute code**: Submit a pull request +- **Improve documentation**: Help make the docs better +- **Share examples**: Contribute use cases and tutorials + +See the :ref:`contributing` guide for more information on how to contribute. + + +Sponsorship +----------- + +German Center for Open Source AI (GC.OS) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Hyperactive is proudly sponsored by the +`German Center for Open Source AI (GC.OS) `_. + +.. image:: https://img.shields.io/badge/GC.OS-Sponsored%20Project-orange.svg?style=for-the-badge&colorA=0eac92&colorB=2077b4 + :target: https://gc-os-ai.github.io/ + :alt: GC.OS Sponsored + +This sponsorship helps ensure the continued development and maintenance of +Hyperactive as a high-quality open-source project. + + +Community +--------- + +Join the Hyperactive community: + +- **Discord**: `Join our Discord server `_ + for discussions, questions, and announcements. + +- **LinkedIn**: Follow the + `German Center for Open Source AI `_ + for updates. + +- **GitHub Discussions**: Participate in + `discussions `_ + about features and best practices. + + +Acknowledgments +--------------- + +We thank all users who have: + +- Reported bugs and issues +- Suggested new features +- Contributed to the codebase +- Helped improve the documentation +- Shared Hyperactive with others + +Your support makes this project possible! diff --git a/docs/source/api_reference.rst b/docs/source/api_reference.rst new file mode 100644 index 00000000..7f5996f7 --- /dev/null +++ b/docs/source/api_reference.rst @@ -0,0 +1,21 @@ +.. _api_reference: + +============= +API Reference +============= + +Welcome to the API reference for ``hyperactive``. + +The API reference provides a technical manual. +It describes the classes and functions included in Hyperactive. + +.. toctree:: + :maxdepth: 1 + + api_reference/base + api_reference/optimizers + api_reference/experiments_function + api_reference/experiments_integrations + api_reference/experiments_benchmarks + api_reference/sklearn_integration + api_reference/utilities diff --git a/docs/source/api_reference/base.rst b/docs/source/api_reference/base.rst new file mode 100644 index 00000000..a7dca433 --- /dev/null +++ b/docs/source/api_reference/base.rst @@ -0,0 +1,17 @@ +.. _base_ref: + +Base Classes +============ + +The :mod:`hyperactive.base` module contains the base classes for optimizers and experiments. + +All optimizers inherit from ``BaseOptimizer`` and all experiments inherit from ``BaseExperiment``. + +.. currentmodule:: hyperactive.base + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + BaseOptimizer + BaseExperiment diff --git a/docs/source/api_reference/experiments_benchmarks.rst b/docs/source/api_reference/experiments_benchmarks.rst new file mode 100644 index 00000000..0c46dba5 --- /dev/null +++ b/docs/source/api_reference/experiments_benchmarks.rst @@ -0,0 +1,19 @@ +.. _experiments_benchmarks_ref: + +Benchmark Function Experiments +=============================== + +The :mod:`hyperactive.experiment.bench` module contains standard benchmark functions +for testing and comparing optimization algorithms. + +These mathematical functions have known properties and are commonly used in optimization research. + +.. currentmodule:: hyperactive.experiment.bench + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + Ackley + Parabola + Sphere diff --git a/docs/source/api_reference/experiments_function.rst b/docs/source/api_reference/experiments_function.rst new file mode 100644 index 00000000..bef832ae --- /dev/null +++ b/docs/source/api_reference/experiments_function.rst @@ -0,0 +1,20 @@ +.. _experiments_function_ref: + +Function Experiments +==================== + +The :mod:`hyperactive.experiment` module contains experiment classes for defining +optimization objectives. + +Generic Function Experiment +---------------------------- + +The ``FunctionExperiment`` class allows you to wrap any callable as an optimization target. + +.. currentmodule:: hyperactive.experiment.func + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + FunctionExperiment diff --git a/docs/source/api_reference/experiments_integrations.rst b/docs/source/api_reference/experiments_integrations.rst new file mode 100644 index 00000000..913e249a --- /dev/null +++ b/docs/source/api_reference/experiments_integrations.rst @@ -0,0 +1,57 @@ +.. _experiments_integrations_ref: + +Framework Integration Experiments +================================== + +The :mod:`hyperactive.experiment.integrations` module contains experiment classes +for integration with machine learning frameworks. + +These experiments provide seamless hyperparameter optimization for scikit-learn, +sktime, skpro, and PyTorch Lightning models. + +Scikit-Learn +------------ + +Cross-validation experiments for scikit-learn estimators. + +.. currentmodule:: hyperactive.experiment.integrations + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + SklearnCvExperiment + +Sktime - Time Series +-------------------- + +Experiments for sktime time series models. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + SktimeClassificationExperiment + SktimeForecastingExperiment + +Skpro - Probabilistic Prediction +--------------------------------- + +Experiments for skpro probabilistic regression models. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + SkproProbaRegExperiment + +PyTorch Lightning +----------------- + +Experiments for PyTorch Lightning models. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + TorchExperiment diff --git a/docs/source/api_reference/optimizers.rst b/docs/source/api_reference/optimizers.rst new file mode 100644 index 00000000..ab9f0224 --- /dev/null +++ b/docs/source/api_reference/optimizers.rst @@ -0,0 +1,128 @@ +.. _optimizers_ref: + +Optimizers +========== + +The :mod:`hyperactive.opt` module contains optimization algorithms for hyperparameter tuning. + +All optimizers inherit from :class:`hyperactive.base.BaseOptimizer` and share the same interface: +the ``solve()`` method to run optimization, and configuration via the ``experiment`` and ``search_space`` parameters. + +Gradient-Free Optimizers (GFO) +------------------------------- + +These optimizers are based on the gradient-free-optimizers package and implement +various metaheuristic and numerical optimization algorithms. + +Local Search +~~~~~~~~~~~~ + +Local search algorithms that explore the neighborhood of the current position. + +.. currentmodule:: hyperactive.opt + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + HillClimbing + StochasticHillClimbing + RepulsingHillClimbing + RandomRestartHillClimbing + +Simulated Annealing +~~~~~~~~~~~~~~~~~~~ + +Probabilistic techniques for approximating the global optimum. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + SimulatedAnnealing + +Global Search +~~~~~~~~~~~~~ + +Random and systematic search strategies. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + RandomSearch + GridSearch + +Direct Methods +~~~~~~~~~~~~~~ + +Direct search methods that do not use gradients. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + DownhillSimplexOptimizer + PowellsMethod + PatternSearch + LipschitzOptimizer + DirectAlgorithm + +Population-Based +~~~~~~~~~~~~~~~~ + +Optimization algorithms that maintain and evolve populations of solutions. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + ParallelTempering + ParticleSwarmOptimizer + SpiralOptimization + GeneticAlgorithm + EvolutionStrategy + DifferentialEvolution + +Sequential Model-Based +~~~~~~~~~~~~~~~~~~~~~~ + +Algorithms that build surrogate models of the objective function. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + BayesianOptimizer + TreeStructuredParzenEstimators + ForestOptimizer + +Optuna-Based Optimizers +------------------------ + +These optimizers provide an interface to Optuna's optimization algorithms. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + TPEOptimizer + RandomOptimizer + CmaEsOptimizer + GPOptimizer + GridOptimizer + NSGAIIOptimizer + NSGAIIIOptimizer + QMCOptimizer + +Scikit-Learn Style +------------------- + +Optimizers with a scikit-learn compatible interface. + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + GridSearchSk + RandomSearchSk diff --git a/docs/source/api_reference/sklearn_integration.rst b/docs/source/api_reference/sklearn_integration.rst new file mode 100644 index 00000000..82e454de --- /dev/null +++ b/docs/source/api_reference/sklearn_integration.rst @@ -0,0 +1,18 @@ +.. _sklearn_integration_ref: + +Scikit-Learn Integration +========================= + +The :mod:`hyperactive.integrations.sklearn` module provides scikit-learn compatible +meta-estimators for hyperparameter optimization. + +These classes follow the scikit-learn estimator API and can be used as drop-in replacements +for scikit-learn's GridSearchCV and RandomizedSearchCV. + +.. currentmodule:: hyperactive.integrations.sklearn + +.. autosummary:: + :toctree: auto_generated/ + :template: class.rst + + OptCV diff --git a/docs/source/api_reference/utilities.rst b/docs/source/api_reference/utilities.rst new file mode 100644 index 00000000..c531e044 --- /dev/null +++ b/docs/source/api_reference/utilities.rst @@ -0,0 +1,18 @@ +.. _utilities_ref: + +Utilities +========= + +The :mod:`hyperactive.utils` module contains utility functions for working with +Hyperactive estimators. + +Estimator Validation +-------------------- + +.. currentmodule:: hyperactive.utils + +.. autosummary:: + :toctree: auto_generated/ + :template: function.rst + + check_estimator diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 00000000..de3e263e --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,435 @@ +#!/usr/bin/env python3 +"""Configuration file for the Sphinx documentation builder.""" + +import datetime +import os +import re +import sys +from pathlib import Path + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +ON_READTHEDOCS = os.environ.get("READTHEDOCS") == "True" +if not ON_READTHEDOCS: + sys.path.insert(0, os.path.abspath("../../src")) + +import hyperactive # noqa: E402 # must be after sys.path modification + +# -- Extract metadata from pyproject.toml ------------------------------------ +# This allows documentation to stay in sync with pyproject.toml automatically + + +def extract_pyproject_metadata(): + """Extract metadata from pyproject.toml for use in documentation.""" + pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml" + + metadata = { + "python_versions": [], + "min_python": "3.10", + "dependencies": [], + "version": hyperactive.__version__, + } + + if pyproject_path.exists(): + content = pyproject_path.read_text() + + # Extract Python versions from classifiers + # Pattern: "Programming Language :: Python :: 3.XX" + py_version_pattern = r'"Programming Language :: Python :: (3\.\d+)"' + versions = re.findall(py_version_pattern, content) + if versions: + metadata["python_versions"] = sorted(set(versions)) + + # Extract requires-python + requires_python_match = re.search(r'requires-python\s*=\s*"([^"]+)"', content) + if requires_python_match: + req = requires_python_match.group(1) + # Extract minimum version from ">=3.10" or similar + min_match = re.search(r">=\s*([\d.]+)", req) + if min_match: + metadata["min_python"] = min_match.group(1) + + # Extract core dependencies + deps_match = re.search(r"dependencies\s*=\s*\[(.*?)\]", content, re.DOTALL) + if deps_match: + deps_text = deps_match.group(1) + # Extract package names (first word before any version specifier) + dep_names = re.findall(r'"([a-zA-Z][a-zA-Z0-9_-]*)', deps_text) + metadata["dependencies"] = dep_names + + return metadata + + +# Extract metadata once at configuration time +PYPROJECT_METADATA = extract_pyproject_metadata() + +# Build Python version range string from metadata +_py_versions = PYPROJECT_METADATA["python_versions"] +if _py_versions: + _py_version_range = f"{_py_versions[0]} - {_py_versions[-1]}" +else: + _py_version_range = "3.10+" + +# -- Project information ----------------------------------------------------- +current_year = datetime.datetime.now().year +project = "hyperactive" +project_copyright = f"2019 - {current_year} (MIT License)" +author = "Simon Blanke" + +# The full version, including alpha/beta/rc tags +CURRENT_VERSION = f"v{hyperactive.__version__}" + +# If on readthedocs, and we're building the latest version, update tag to generate +# correct links in notebooks +if ON_READTHEDOCS: + READTHEDOCS_VERSION = os.environ.get("READTHEDOCS_VERSION") + if READTHEDOCS_VERSION == "latest": + CURRENT_VERSION = "main" + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.autosectionlabel", + "numpydoc", + "sphinx.ext.intersphinx", + "sphinx.ext.linkcode", # link to GitHub source code via linkcode_resolve() + "myst_parser", + "sphinx_copybutton", + "sphinx_design", + "sphinx_issues", + "sphinx.ext.doctest", +] + +# Recommended by sphinx_design when using the MyST Parser +myst_enable_extensions = ["colon_fence"] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = { + ".rst": "restructuredtext", + ".md": "markdown", +} + +# The main toctree document. +master_doc = "index" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = "en" + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [ + "_build", + ".ipynb_checkpoints", + "Thumbs.db", + ".DS_Store", +] + +add_module_names = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# see http://stackoverflow.com/q/12206334/562769 +numpydoc_show_class_members = True +# this is needed for some reason... +# see https://github.com/numpy/numpydoc/issues/69 +numpydoc_class_members_toctree = False + +# https://numpydoc.readthedocs.io/en/latest/validation.html#built-in-validation-checks +# Let's turn off the check for building but keep it in pre-commit hooks +numpydoc_validation_checks = set() + +# generate autosummary even if no references +autosummary_generate = True + +# Members and inherited-members default to showing methods and attributes from a +# class or those inherited. +# Member-order orders the documentation in the order of how the members are defined in +# the source code. +autodoc_default_options = { + "members": True, + "inherited-members": True, + "member-order": "bysource", +} + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = False + +# Suppress warnings +suppress_warnings = [ + "myst.mathjax", + "docutils", + "toc.not_included", + "autodoc.import_object", + "autosectionlabel", + "ref", +] + +show_warning_types = True + +# Link to GitHub repo for github_issues extension +issues_github_path = "SimonBlanke/Hyperactive" + + +def linkcode_resolve(domain, info): + """Return URL to source code corresponding. + + Parameters + ---------- + domain : str + info : dict + + Returns + ------- + url : str + """ + + def find_source(): + # try to find the file and line number, based on code from numpy: + # https://github.com/numpy/numpy/blob/main/doc/source/conf.py#L286 + obj = sys.modules[info["module"]] + for part in info["fullname"].split("."): + obj = getattr(obj, part) + import inspect + import os + + fn = inspect.getsourcefile(obj) + fn = os.path.relpath(fn, start=os.path.dirname(hyperactive.__file__)) + source, lineno = inspect.getsourcelines(obj) + return fn, lineno, lineno + len(source) - 1 + + if domain != "py" or not info["module"]: + return None + try: + filename = "src/hyperactive/%s#L%d-L%d" % find_source() + except Exception: + filename = info["module"].replace(".", "/") + ".py" + return ( + f"https://github.com/SimonBlanke/Hyperactive/blob/{CURRENT_VERSION}/{filename}" + ) + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. + +html_theme = "pydata_sphinx_theme" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. + +html_theme_options = { + "logo": { + "image_light": "_static/images/navbar_logo.svg", + "image_dark": "_static/images/navbar_logo_dark.svg", + }, + "icon_links": [ + { + "name": "GitHub", + "url": "https://github.com/SimonBlanke/Hyperactive", + "icon": "fab fa-github", + }, + { + "name": "Star on GitHub", + "url": "https://github.com/SimonBlanke/Hyperactive/stargazers", + "icon": "fa-regular fa-star", + }, + ], + "show_prev_next": False, + "use_edit_page_button": False, + "navbar_start": ["navbar-logo"], + "navbar_center": ["navbar-nav"], + "navbar_end": ["theme-switcher", "navbar-icon-links"], + "show_toc_level": 2, + "secondary_sidebar_items": ["page-toc", "sourcelink"], +} + +html_title = "Hyperactive" +html_context = { + "github_user": "SimonBlanke", + "github_repo": "Hyperactive", + "github_version": "master", + "doc_path": "auto-doc/source/", + "python_version_range": _py_version_range, +} + +html_sidebars = { + "**": ["sidebar-nav-bs.html"], + "index": [], + "get_started": [], + "installation": [], + "search": [], +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] +html_css_files = ["css/custom.css"] + +html_show_sourcelink = False + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = "hyperactivedoc" + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "hyperactive.tex", + "Hyperactive Documentation", + "Simon Blanke", + "manual", + ), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "hyperactive", "Hyperactive Documentation", [author], 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "hyperactive", + "Hyperactive Documentation", + author, + "hyperactive", + "An optimization and data collection toolbox for convenient and fast prototyping.", + "Miscellaneous", + ), +] + + +def setup(app): + """Set up sphinx builder. + + Parameters + ---------- + app : Sphinx application object + """ + + def adds(pth): + print("Adding stylesheet: %s" % pth) # noqa: T201, T001 + app.add_css_file(pth) + + adds("fields.css") # for parameters, etc. + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for intersphinx extension --------------------------------------- + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": (f"https://docs.python.org/{sys.version_info.major}", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/", None), + "matplotlib": ("https://matplotlib.org/stable/", None), + "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), + "joblib": ("https://joblib.readthedocs.io/en/stable/", None), + "scikit-learn": ("https://scikit-learn.org/stable/", None), +} + +# -- Options for todo extension ---------------------------------------------- +todo_include_todos = False + +copybutton_prompt_text = r">>> |\.\.\. |\$ " +copybutton_prompt_is_regexp = True +copybutton_line_continuation_character = "\\" + +# -- RST Epilog: Make metadata available as substitutions in RST files ------- +# These can be used as |variable_name| in RST files + +# Build additional Python versions formatting for RST +if _py_versions: + _py_versions_inline = ", ".join(_py_versions) +else: + _py_versions_inline = "3.10+" + + +# -- Count algorithms and integrations dynamically ---------------------------- +def count_from_all_list(module_path: str) -> int: + """Count items in __all__ list of a Python module file.""" + import ast + + file_path = Path(__file__).parent.parent.parent / "src" / module_path + if not file_path.exists(): + return 0 + + try: + tree = ast.parse(file_path.read_text()) + for node in ast.walk(tree): + if isinstance(node, ast.Assign): + for target in node.targets: + if isinstance(target, ast.Name) and target.id == "__all__": + if isinstance(node.value, ast.List): + return len(node.value.elts) + except Exception: + pass + return 0 + + +# Count algorithms from opt/__init__.py +_n_algorithms = count_from_all_list("hyperactive/opt/__init__.py") + +# Count integrations from experiment/integrations/__init__.py +_n_integrations = count_from_all_list("hyperactive/experiment/integrations/__init__.py") + +# Backends are conceptual (GFO, Optuna, sklearn) - hardcoded +_n_backends = 3 + + +rst_epilog = f""" +.. |version| replace:: {PYPROJECT_METADATA["version"]} +.. |min_python| replace:: {PYPROJECT_METADATA["min_python"]} +.. |python_versions_list| replace:: {_py_versions_inline} +.. |python_version_range| replace:: {_py_version_range} +.. |current_year| replace:: {current_year} +.. |n_algorithms| replace:: {_n_algorithms} +.. |n_backends| replace:: {_n_backends} +.. |n_integrations| replace:: {_n_integrations} +""" diff --git a/docs/source/examples.rst b/docs/source/examples.rst new file mode 100644 index 00000000..c8b88c45 --- /dev/null +++ b/docs/source/examples.rst @@ -0,0 +1,83 @@ +.. _examples: + +======== +Examples +======== + +This section provides a collection of examples demonstrating Hyperactive's capabilities. +All examples are available in the +`examples directory `_ +on GitHub. + +.. toctree:: + :maxdepth: 1 + + examples/general + examples/local_search + examples/global_search + examples/population_based + examples/sequential_model_based + examples/optuna_backend + examples/sklearn_backend + examples/integrations + examples/other + examples/interactive_tutorial + + +Overview +-------- + +Hyperactive provides examples for all optimization algorithms and integration +patterns. The examples are organized by algorithm category: + + +Gradient-Free Optimizers (GFO) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:ref:`examples_general` + Basic examples showing custom function optimization and sklearn model tuning. + +:ref:`examples_local_search` + Hill Climbing, Simulated Annealing, Downhill Simplex, and other local + search methods that explore by making incremental moves. + +:ref:`examples_global_search` + Random Search, Grid Search, Powell's Method, and other algorithms that + explore the search space more broadly. + +:ref:`examples_population_based` + Particle Swarm, Genetic Algorithm, Evolution Strategy, and other + nature-inspired population methods. + +:ref:`examples_sequential_model_based` + Bayesian Optimization, Tree-Parzen Estimators, and other model-based + methods that learn from previous evaluations. + + +Backend Examples +^^^^^^^^^^^^^^^^ + +:ref:`examples_optuna_backend` + Examples using Optuna's samplers including TPE, CMA-ES, NSGA-II/III, + and Gaussian Process optimization. + +:ref:`examples_sklearn_backend` + Scikit-learn compatible interfaces as drop-in replacements for + GridSearchCV and RandomizedSearchCV. + + +Integration Examples +^^^^^^^^^^^^^^^^^^^^ + +:ref:`examples_integrations` + Time series optimization with sktime and other framework integrations. + + +Advanced Topics +^^^^^^^^^^^^^^^ + +:ref:`examples_other` + Advanced usage patterns including warm starting and optimizer comparison. + +:ref:`examples_interactive_tutorial` + Comprehensive Jupyter notebook tutorial covering all Hyperactive features. diff --git a/docs/source/examples/general.rst b/docs/source/examples/general.rst new file mode 100644 index 00000000..113171b8 --- /dev/null +++ b/docs/source/examples/general.rst @@ -0,0 +1,47 @@ +.. _examples_general: + +================ +General Examples +================ + +These examples demonstrate Hyperactive's core functionality with simple, +illustrative use cases. + + +Running Examples +---------------- + +All examples are available in the +`examples directory `_ +on GitHub. You can run any example directly: + +.. code-block:: bash + + # Clone the repository + git clone https://github.com/SimonBlanke/Hyperactive.git + cd Hyperactive/examples + + # Run an example + python gfo/hill_climbing_example.py + + +Custom Function Optimization +---------------------------- + +The simplest use case: optimizing a mathematical function. + +.. literalinclude:: ../_snippets/examples/basic_examples.py + :language: python + :start-after: # [start:custom_function] + :end-before: # [end:custom_function] + + +Scikit-learn Model Tuning +------------------------- + +Hyperparameter optimization for machine learning models. + +.. literalinclude:: ../_snippets/examples/basic_examples.py + :language: python + :start-after: # [start:sklearn_tuning] + :end-before: # [end:sklearn_tuning] diff --git a/docs/source/examples/global_search.rst b/docs/source/examples/global_search.rst new file mode 100644 index 00000000..d50ec5d5 --- /dev/null +++ b/docs/source/examples/global_search.rst @@ -0,0 +1,44 @@ +.. _examples_global_search: + +======================== +Global Search Algorithms +======================== + +Global search algorithms explore the search space more broadly, using +randomization or systematic patterns to avoid getting trapped in local optima. + + +Algorithm Examples +------------------ + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Algorithm + - Example + * - Random Search + - `random_search_example.py `_ + * - Grid Search + - `grid_search_example.py `_ + * - Random Restart Hill Climbing + - `random_restart_hill_climbing_example.py `_ + * - Stochastic Hill Climbing + - `stochastic_hill_climbing_example.py `_ + * - Powell's Method + - `powells_method_example.py `_ + * - Pattern Search + - `pattern_search_example.py `_ + + +When to Use Global Search +------------------------- + +Global search algorithms are best suited for: + +- **Multimodal search spaces** with multiple local optima +- **Initial exploration** before fine-tuning with local search +- **Unknown search spaces** where the landscape is not well understood +- **Baseline comparisons** (especially Random Search) + +See :ref:`user_guide_optimizers` for detailed algorithm descriptions. diff --git a/docs/source/examples/integrations.rst b/docs/source/examples/integrations.rst new file mode 100644 index 00000000..2fff131c --- /dev/null +++ b/docs/source/examples/integrations.rst @@ -0,0 +1,49 @@ +.. _examples_integrations: + +============ +Integrations +============ + +Hyperactive integrates with popular machine learning frameworks beyond +scikit-learn, including time series libraries. + + +Sktime Integration +------------------ + +For time series forecasting and classification with sktime: + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Use Case + - Example + * - Time Series Forecasting + - `sktime_forecasting_example.py `_ + * - Time Series Classification + - `sktime_tsc_example.py `_ + +.. note:: + + Sktime integration requires additional dependencies: + + .. code-block:: bash + + pip install hyperactive[sktime-integration] + + +Installing Extras +----------------- + +Install integration extras as needed: + +.. code-block:: bash + + # Sktime/skpro for time series + pip install hyperactive[sktime-integration] + + # All extras including PyTorch Lightning + pip install hyperactive[all_extras] + +See :ref:`user_guide_integrations` for complete integration documentation. diff --git a/docs/source/examples/interactive_tutorial.rst b/docs/source/examples/interactive_tutorial.rst new file mode 100644 index 00000000..b4e63e75 --- /dev/null +++ b/docs/source/examples/interactive_tutorial.rst @@ -0,0 +1,50 @@ +.. _examples_interactive_tutorial: + +==================== +Interactive Tutorial +==================== + +For hands-on learning, we provide a comprehensive Jupyter notebook tutorial +that covers all aspects of Hyperactive. + + +Tutorial Notebook +----------------- + +`Hyperactive Tutorial Notebook `_ + +This interactive notebook covers: + +- **Basic optimization concepts** — Understanding search spaces and objective functions +- **All optimizer categories** — Hands-on examples with each algorithm type +- **Real-world ML examples** — Practical hyperparameter optimization workflows +- **Best practices and tips** — Common pitfalls and how to avoid them + + +Running the Tutorial +-------------------- + +You can run the tutorial locally: + +.. code-block:: bash + + # Clone the tutorial repository + git clone https://github.com/SimonBlanke/hyperactive-tutorial.git + cd hyperactive-tutorial + + # Install dependencies + pip install -r requirements.txt + + # Launch Jupyter + jupyter notebook notebooks/hyperactive_tutorial.ipynb + +Or view it directly on `nbviewer `_ +without any installation. + + +Additional Resources +-------------------- + +- `Hyperactive GitHub Repository `_ +- `Gradient-Free-Optimizers `_ — The underlying optimization library +- :ref:`user_guide` — Detailed documentation of all features diff --git a/docs/source/examples/local_search.rst b/docs/source/examples/local_search.rst new file mode 100644 index 00000000..d4c9ad7a --- /dev/null +++ b/docs/source/examples/local_search.rst @@ -0,0 +1,41 @@ +.. _examples_local_search: + +======================= +Local Search Algorithms +======================= + +Local search algorithms explore the search space by making small, incremental +moves from the current position. They are efficient for finding local optima +but may get stuck without escaping mechanisms. + + +Algorithm Examples +------------------ + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Algorithm + - Example + * - Hill Climbing + - `hill_climbing_example.py `_ + * - Repulsing Hill Climbing + - `repulsing_hill_climbing_example.py `_ + * - Simulated Annealing + - `simulated_annealing_example.py `_ + * - Downhill Simplex + - `downhill_simplex_example.py `_ + + +When to Use Local Search +------------------------ + +Local search algorithms are best suited for: + +- **Smooth search spaces** where nearby points have similar scores +- **Fine-tuning** around a known good region +- **Fast convergence** when a good starting point is available +- **Limited computational budget** where few evaluations are possible + +See :ref:`user_guide_optimizers` for detailed algorithm descriptions. diff --git a/docs/source/examples/optuna_backend.rst b/docs/source/examples/optuna_backend.rst new file mode 100644 index 00000000..3a8067b7 --- /dev/null +++ b/docs/source/examples/optuna_backend.rst @@ -0,0 +1,56 @@ +.. _examples_optuna_backend: + +============== +Optuna Backend +============== + +Hyperactive provides wrappers for Optuna's optimization algorithms, allowing +you to use Optuna's powerful samplers with Hyperactive's interface. + +.. note:: + + Optuna must be installed separately: + + .. code-block:: bash + + pip install optuna + # or + pip install hyperactive[all_extras] + + +Sampler Examples +---------------- + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Sampler + - Example + * - TPE (Tree-Parzen Estimator) + - `tpe_sampler_example.py `_ + * - CMA-ES + - `cmaes_sampler_example.py `_ + * - Gaussian Process + - `gp_sampler_example.py `_ + * - NSGA-II + - `nsga_ii_sampler_example.py `_ + * - NSGA-III + - `nsga_iii_sampler_example.py `_ + * - QMC (Quasi-Monte Carlo) + - `qmc_sampler_example.py `_ + * - Random + - `random_sampler_example.py `_ + * - Grid + - `grid_sampler_example.py `_ + + +When to Use Optuna Backend +-------------------------- + +The Optuna backend is useful when you need: + +- **Multi-objective optimization** (NSGA-II, NSGA-III) +- **Advanced sampling strategies** like CMA-ES or QMC +- **Optuna's pruning capabilities** for early stopping +- **Compatibility** with existing Optuna workflows diff --git a/docs/source/examples/other.rst b/docs/source/examples/other.rst new file mode 100644 index 00000000..ce96ca09 --- /dev/null +++ b/docs/source/examples/other.rst @@ -0,0 +1,46 @@ +.. _examples_other: + +================= +Advanced Examples +================= + +These examples demonstrate advanced Hyperactive features for more sophisticated +optimization workflows. + + +Warm Starting Optimization +-------------------------- + +Start optimization from known good points to accelerate convergence: + +.. literalinclude:: ../_snippets/examples/advanced_examples.py + :language: python + :start-after: # [start:warm_starting] + :end-before: # [end:warm_starting] + + +Comparing Optimizers +-------------------- + +Compare different optimization strategies on the same problem: + +.. literalinclude:: ../_snippets/examples/advanced_examples.py + :language: python + :start-after: # [start:comparing_optimizers] + :end-before: # [end:comparing_optimizers] + + +Tips for Advanced Usage +----------------------- + +**Warm Starting** + +- Use results from previous runs to seed new optimizations +- Helpful when iterating on model architecture or features +- Combine with local search for fine-tuning around known good points + +**Optimizer Comparison** + +- Always use the same ``random_state`` for reproducible comparisons +- Run multiple trials to account for optimizer randomness +- Consider both final score and convergence speed diff --git a/docs/source/examples/population_based.rst b/docs/source/examples/population_based.rst new file mode 100644 index 00000000..f115457f --- /dev/null +++ b/docs/source/examples/population_based.rst @@ -0,0 +1,45 @@ +.. _examples_population_based: + +=========================== +Population-Based Algorithms +=========================== + +Population-based algorithms maintain multiple candidate solutions simultaneously, +using mechanisms inspired by natural evolution or swarm behavior to explore +the search space efficiently. + + +Algorithm Examples +------------------ + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Algorithm + - Example + * - Particle Swarm + - `particle_swarm_example.py `_ + * - Genetic Algorithm + - `genetic_algorithm_example.py `_ + * - Evolution Strategy + - `evolution_strategy_example.py `_ + * - Differential Evolution + - `differential_evolution_example.py `_ + * - Parallel Tempering + - `parallel_tempering_example.py `_ + * - Spiral Optimization + - `spiral_optimization_example.py `_ + + +When to Use Population-Based Methods +------------------------------------ + +Population-based algorithms are best suited for: + +- **Complex, multimodal landscapes** with many local optima +- **Parallelizable evaluations** where multiple candidates can be evaluated simultaneously +- **Robust optimization** where diversity helps avoid premature convergence +- **Large search spaces** requiring extensive exploration + +See :ref:`user_guide_optimizers` for detailed algorithm descriptions. diff --git a/docs/source/examples/sequential_model_based.rst b/docs/source/examples/sequential_model_based.rst new file mode 100644 index 00000000..bde4fca2 --- /dev/null +++ b/docs/source/examples/sequential_model_based.rst @@ -0,0 +1,43 @@ +.. _examples_sequential_model_based: + +================================= +Sequential Model-Based Algorithms +================================= + +Sequential model-based optimization (SMBO) algorithms build a surrogate model +of the objective function to guide the search. They are particularly effective +when function evaluations are expensive. + + +Algorithm Examples +------------------ + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Algorithm + - Example + * - Bayesian Optimization + - `bayesian_optimization_example.py `_ + * - Tree-Parzen Estimators + - `tree_structured_parzen_estimators_example.py `_ + * - Forest Optimizer + - `forest_optimizer_example.py `_ + * - Lipschitz Optimizer + - `lipschitz_optimizer_example.py `_ + * - DIRECT Algorithm + - `direct_algorithm_example.py `_ + + +When to Use Model-Based Methods +------------------------------- + +Sequential model-based algorithms are best suited for: + +- **Expensive objective functions** (e.g., training neural networks, simulations) +- **Limited evaluation budgets** where each evaluation counts +- **Smooth, continuous search spaces** where surrogate models work well +- **Hyperparameter optimization** for machine learning models + +See :ref:`user_guide_optimizers` for detailed algorithm descriptions. diff --git a/docs/source/examples/sklearn_backend.rst b/docs/source/examples/sklearn_backend.rst new file mode 100644 index 00000000..b74f0623 --- /dev/null +++ b/docs/source/examples/sklearn_backend.rst @@ -0,0 +1,57 @@ +.. _examples_sklearn_backend: + +=============== +Sklearn Backend +=============== + +Hyperactive provides scikit-learn compatible interfaces that work as drop-in +replacements for ``GridSearchCV`` and ``RandomizedSearchCV``. + + +Example Files +------------- + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Use Case + - Example + * - Classification with OptCV + - `sklearn_classif_example.py `_ + * - Grid Search + - `grid_search_example.py `_ + * - Random Search + - `random_search_example.py `_ + + +Usage Overview +-------------- + +Hyperactive's sklearn-compatible classes follow the familiar fit/predict pattern: + +.. code-block:: python + + from hyperactive.integrations.sklearn import HyperactiveSearchCV + from sklearn.ensemble import RandomForestClassifier + + # Define search space + param_space = { + "n_estimators": [50, 100, 200], + "max_depth": [5, 10, 15, None], + } + + # Create search object + search = HyperactiveSearchCV( + estimator=RandomForestClassifier(), + param_space=param_space, + n_iter=50, + ) + + # Fit like any sklearn estimator + search.fit(X_train, y_train) + + # Access best parameters + print(search.best_params_) + +See :ref:`user_guide_integrations` for complete documentation. diff --git a/docs/source/faq.rst b/docs/source/faq.rst new file mode 100644 index 00000000..ed5360a9 --- /dev/null +++ b/docs/source/faq.rst @@ -0,0 +1,40 @@ +.. _faq: + +========================== +Frequently Asked Questions +========================== + +This section answers common questions about Hyperactive. For migration from v4, +see the :ref:`user_guide_migration`. + +.. toctree:: + :maxdepth: 1 + + faq/getting_started + faq/search_space + faq/common_issues + faq/advanced_usage + faq/integrations + faq/getting_help + + +Overview +-------- + +:ref:`faq_getting_started` + Choosing optimizers, iteration counts, and understanding maximization. + +:ref:`faq_search_space` + Defining continuous, discrete, and mixed parameter spaces. + +:ref:`faq_common_issues` + Slow optimization, reproducibility, handling errors. + +:ref:`faq_advanced_usage` + Parallel execution, callbacks, parameter constraints. + +:ref:`faq_integrations` + Using Hyperactive with PyTorch, XGBoost, and other frameworks. + +:ref:`faq_getting_help` + Where to report bugs and get support. diff --git a/docs/source/faq/advanced_usage.rst b/docs/source/faq/advanced_usage.rst new file mode 100644 index 00000000..2199cc38 --- /dev/null +++ b/docs/source/faq/advanced_usage.rst @@ -0,0 +1,69 @@ +.. _faq_advanced_usage: + +============== +Advanced Usage +============== + +Can I run optimizations in parallel? +------------------------------------ + +Currently, Hyperactive v5 runs single optimizer instances. +For parallel evaluation of candidates, consider using Optuna +backend optimizers which support parallel trials: + +.. code-block:: python + + from hyperactive.opt.optuna import TPEOptimizer + + optimizer = TPEOptimizer( + search_space=search_space, + n_iter=100, + experiment=objective, + # Optuna handles parallelization + ) + + +Can I save and resume optimization? +----------------------------------- + +This feature is planned but not yet available in v5. As a workaround, +you can log results during optimization and use them as initial points +for a new run. + + +Are callbacks supported? +------------------------ + +User-defined callbacks during optimization are not currently supported in v5. +The Optuna backend has internal early-stopping callbacks, but there's no +general callback interface for tracking progress or modifying behavior during +optimization. + +For progress monitoring, you can add logging inside your objective function: + +.. code-block:: python + + iteration = 0 + + def objective(params): + global iteration + iteration += 1 + score = evaluate_model(params) + print(f"Iteration {iteration}: score={score:.4f}") + return score + + +How do I add constraints between parameters? +-------------------------------------------- + +Handle constraints in your objective function by returning a poor score +for invalid combinations: + +.. code-block:: python + + def objective(params): + # Constraint: min_samples_split must be >= min_samples_leaf + if params["min_samples_split"] < params["min_samples_leaf"]: + return -np.inf # Invalid configuration + + return evaluate_model(params) diff --git a/docs/source/faq/common_issues.rst b/docs/source/faq/common_issues.rst new file mode 100644 index 00000000..e95f3466 --- /dev/null +++ b/docs/source/faq/common_issues.rst @@ -0,0 +1,72 @@ +.. _faq_common_issues: + +============= +Common Issues +============= + +Why is my optimization slow? +---------------------------- + +**Slow objective function**: The optimizer only controls search strategy. +If each evaluation takes a long time, consider: + +- Reducing cross-validation folds +- Using a subset of training data for tuning +- Simplifying your model during search + +**Large search space**: More combinations require more iterations. +Consider reducing parameter granularity or using smarter optimizers +like Bayesian optimization. + +**Too many iterations**: Start with fewer iterations and increase +if needed. + + +Why does my score vary between runs? +------------------------------------ + +Optimization algorithms are stochastic. To get reproducible results, +set a random seed: + +.. code-block:: python + + optimizer = HillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, + random_state=42, # Set seed for reproducibility + ) + + +My objective function returns NaN or raises exceptions +------------------------------------------------------ + +Handle invalid configurations in your objective function: + +.. code-block:: python + + def objective(params): + try: + score = evaluate_model(params) + if np.isnan(score): + return -np.inf # Return worst possible score + return score + except Exception: + return -np.inf # Return worst possible score on error + + +How do I see what parameters were tried? +---------------------------------------- + +Access the search history after optimization: + +.. code-block:: python + + best_params = optimizer.solve() + + # Access results + print(f"Best parameters: {optimizer.best_params_}") + print(f"Best score: {optimizer.best_score_}") + + # Full search history (if available) + # Check optimizer attributes for search_data or similar diff --git a/docs/source/faq/getting_help.rst b/docs/source/faq/getting_help.rst new file mode 100644 index 00000000..acb918cb --- /dev/null +++ b/docs/source/faq/getting_help.rst @@ -0,0 +1,28 @@ +.. _faq_getting_help: + +============ +Getting Help +============ + +Where can I report bugs or request features? +-------------------------------------------- + +Open an issue on `GitHub `_. + + +Where can I get help? +--------------------- + +- Check the :ref:`examples` for code samples +- Read the :ref:`user_guide` for detailed explanations +- Join the `Discord `_ community +- Search or ask on `GitHub Discussions `_ + + +Where is the documentation for older versions? +---------------------------------------------- + +Documentation for Hyperactive v4 is available at: +`Legacy Documentation (v4) `_ + +If you're migrating from v4 to v5, see the :ref:`user_guide_migration`. diff --git a/docs/source/faq/getting_started.rst b/docs/source/faq/getting_started.rst new file mode 100644 index 00000000..fbb3f01d --- /dev/null +++ b/docs/source/faq/getting_started.rst @@ -0,0 +1,53 @@ +.. _faq_getting_started: + +=============== +Getting Started +=============== + +Which optimizer should I use? +----------------------------- + +For most problems, start with one of these recommendations: + +**Small search spaces (<100 combinations)** + Use :class:`~hyperactive.opt.gfo.GridSearch` to exhaustively evaluate all options. + +**General-purpose optimization** + :class:`~hyperactive.opt.gfo.BayesianOptimizer` works well for expensive + objective functions where you want to minimize evaluations. + +**Fast, simple problems** + :class:`~hyperactive.opt.gfo.HillClimbing` or + :class:`~hyperactive.opt.gfo.RandomSearch` are good starting points. + +**High-dimensional spaces** + Population-based methods like :class:`~hyperactive.opt.gfo.ParticleSwarmOptimizer` + or :class:`~hyperactive.opt.gfo.EvolutionStrategyOptimizer` handle many + parameters well. + +See :ref:`user_guide_optimizers` for detailed guidance on choosing optimizers. + + +How many iterations do I need? +------------------------------ + +This depends on your search space size and objective function: + +- **Rule of thumb**: Start with ``n_iter = 10 * number_of_parameters`` +- **Expensive functions**: Use fewer iterations with Bayesian optimization +- **Fast functions**: Use more iterations with simpler optimizers + +You can monitor progress and stop early if the score plateaus. + + +Does Hyperactive minimize or maximize? +-------------------------------------- + +**Hyperactive maximizes** the objective function. If you want to minimize, +return the negative of your metric: + +.. code-block:: python + + def objective(params): + error = compute_error(params) + return -error # Negate to minimize diff --git a/docs/source/faq/integrations.rst b/docs/source/faq/integrations.rst new file mode 100644 index 00000000..c6d693b6 --- /dev/null +++ b/docs/source/faq/integrations.rst @@ -0,0 +1,64 @@ +.. _faq_integrations: + +============ +Integrations +============ + +Can I use Hyperactive with PyTorch (not Lightning)? +--------------------------------------------------- + +Yes, create a custom objective function: + +.. code-block:: python + + import torch + + def objective(params): + model = MyPyTorchModel( + hidden_size=params["hidden_size"], + dropout=params["dropout"], + ) + # Train and evaluate your model + train_model(model, train_loader) + accuracy = evaluate_model(model, val_loader) + return accuracy + + +How does Hyperactive compare to Optuna? +--------------------------------------- + +**Hyperactive with native GFO backend**: + +- Simple, unified API +- Wide variety of optimization algorithms +- Great for hyperparameter tuning + +**Hyperactive with Optuna backend**: + +- Access Optuna's algorithms through Hyperactive's interface +- Combine the strengths of both libraries + +**Pure Optuna**: + +- More features (pruning, distributed, database storage) +- Larger community and ecosystem +- More configuration options + +Choose based on your needs: Hyperactive for simplicity, Optuna for +advanced features. + + +Can I use Hyperactive with other ML frameworks? +----------------------------------------------- + +Yes, any framework works with custom objective functions: + +.. code-block:: python + + # XGBoost example + import xgboost as xgb + + def objective(params): + model = xgb.XGBClassifier(**params) + scores = cross_val_score(model, X, y, cv=3) + return scores.mean() diff --git a/docs/source/faq/search_space.rst b/docs/source/faq/search_space.rst new file mode 100644 index 00000000..eefccc36 --- /dev/null +++ b/docs/source/faq/search_space.rst @@ -0,0 +1,49 @@ +.. _faq_search_space: + +====================== +Search Space Questions +====================== + +How do I define a continuous search space? +------------------------------------------ + +Use NumPy to create arrays of values: + +.. code-block:: python + + import numpy as np + + search_space = { + "learning_rate": np.logspace(-4, -1, 50), # 0.0001 to 0.1 + "momentum": np.linspace(0.5, 0.99, 50), # 0.5 to 0.99 + } + +Hyperactive samples from these arrays, so finer granularity gives +more precision at the cost of a larger search space. + + +Can I mix discrete and continuous parameters? +--------------------------------------------- + +Yes, mix freely: + +.. code-block:: python + + search_space = { + "n_estimators": [10, 50, 100, 200], # Discrete + "max_depth": list(range(3, 20)), # Discrete range + "learning_rate": np.linspace(0.01, 0.3, 30), # Continuous + "algorithm": ["SAMME", "SAMME.R"], # Categorical + } + + +How do I include None as a parameter value? +------------------------------------------- + +Include ``None`` directly in your list: + +.. code-block:: python + + search_space = { + "max_depth": [None, 3, 5, 10, 20], + } diff --git a/docs/source/get_involved.rst b/docs/source/get_involved.rst new file mode 100644 index 00000000..a7ada9ac --- /dev/null +++ b/docs/source/get_involved.rst @@ -0,0 +1,123 @@ +.. _get_involved: + +============ +Get Involved +============ + +Hyperactive is an open-source project and we welcome contributions from the community! +There are many ways to get involved, whether you're a developer, researcher, or user. + +.. toctree:: + :maxdepth: 1 + + get_involved/contributing + get_involved/code_of_conduct + + +Ways to Contribute +------------------ + +Report Bugs +^^^^^^^^^^^ + +Found a bug? Please report it on GitHub: + +1. Search `existing issues `_ + to see if it's already reported. +2. If not, `open a new issue `_ + with: + + - A clear description of the problem + - Steps to reproduce the issue + - Expected vs actual behavior + - Your Python version and Hyperactive version + - Minimal code example that reproduces the issue + +Suggest Features +^^^^^^^^^^^^^^^^ + +Have an idea for a new feature or improvement? + +1. Open a `GitHub Discussion `_ + to discuss your idea with the community. +2. If there's consensus, create an issue or pull request. + +Contribute Code +^^^^^^^^^^^^^^^ + +Ready to contribute code? See the :ref:`contributing` guide for: + +- Setting up your development environment +- Coding standards and style guide +- How to submit pull requests +- Testing requirements + +Improve Documentation +^^^^^^^^^^^^^^^^^^^^^ + +Documentation improvements are always welcome: + +- Fix typos or unclear explanations +- Add examples and tutorials +- Improve API documentation +- Translate documentation + +Share Your Work +^^^^^^^^^^^^^^^ + +Using Hyperactive in your project? Share your experience: + +- Write a blog post or tutorial +- Present at a meetup or conference +- Share on social media +- Add your project to the examples + + +Community +--------- + +Connect with the Hyperactive community: + +Discord +^^^^^^^ + +Join our `Discord server `_ for: + +- Real-time discussions +- Questions and answers +- Announcements + +GitHub Discussions +^^^^^^^^^^^^^^^^^^ + +Use `GitHub Discussions `_ for: + +- Feature proposals +- Best practices +- Show and tell + +LinkedIn +^^^^^^^^ + +Follow the `German Center for Open Source AI `_ +for news and updates. + + +Support +------- + +Getting Help +^^^^^^^^^^^^ + +If you need help: + +1. Check the :ref:`user_guide` and :ref:`api_reference` +2. Search `existing issues `_ +3. Ask on `Discord `_ +4. Open a new issue with the "question" label + +Star the Project +^^^^^^^^^^^^^^^^ + +If you find Hyperactive useful, please `star it on GitHub `_! +Stars help increase visibility and attract more contributors. diff --git a/docs/source/get_involved/code_of_conduct.rst b/docs/source/get_involved/code_of_conduct.rst new file mode 100644 index 00000000..ce13223e --- /dev/null +++ b/docs/source/get_involved/code_of_conduct.rst @@ -0,0 +1,114 @@ +.. _code_of_conduct: + +=============== +Code of Conduct +=============== + +Hyperactive is committed to providing a welcoming and inclusive environment for everyone. +This Code of Conduct outlines our expectations for participant behavior and the +consequences for unacceptable behavior. + + +Our Pledge +---------- + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + + +Community Guidelines +-------------------- + +Expected Behavior +^^^^^^^^^^^^^^^^^ + +We expect all community members to: + +- **Be respectful**: Treat everyone with respect and consideration. +- **Be constructive**: Provide helpful feedback and suggestions. +- **Be inclusive**: Welcome newcomers and help them get started. +- **Be collaborative**: Work together toward common goals. +- **Be patient**: Remember that people have different skill levels and backgrounds. +- **Be professional**: Keep discussions focused on the project. + + +Unacceptable Behavior +^^^^^^^^^^^^^^^^^^^^^ + +The following behaviors are not acceptable: + +- Harassment, intimidation, or discrimination of any kind +- Offensive comments related to personal characteristics +- Sexual language or imagery in any community space +- Personal attacks or insults +- Trolling or deliberately inflammatory comments +- Publishing others' private information without permission +- Other conduct that could reasonably be considered inappropriate + + +Enforcement +----------- + +Reporting Issues +^^^^^^^^^^^^^^^^ + +If you experience or witness unacceptable behavior, please report it by: + +- **Email**: Contact Simon Blanke at simon.blanke@yahoo.com +- **GitHub**: Open a private issue or contact maintainers directly + +All complaints will be reviewed and investigated promptly and fairly. + + +Consequences +^^^^^^^^^^^^ + +Community leaders will determine appropriate action for violations, which may include: + +1. **Correction**: A private, written warning with clarity about the nature of + the violation and an explanation of why the behavior was inappropriate. + +2. **Warning**: A warning with consequences for continued behavior. No interaction + with the people involved for a specified period of time. + +3. **Temporary Ban**: A temporary ban from any sort of interaction or public + communication with the community for a specified period of time. + +4. **Permanent Ban**: A permanent ban from any sort of public interaction within + the community. + + +Scope +----- + +This Code of Conduct applies within all community spaces, including: + +- GitHub repositories (issues, pull requests, discussions) +- Discord server +- Social media interactions +- In-person events + +It also applies when an individual is officially representing the community +in public spaces. + + +Attribution +----------- + +This Code of Conduct is adapted from the +`Contributor Covenant `_, version 2.0. + + +Questions +--------- + +If you have questions about this Code of Conduct, please contact: + +- **Email**: simon.blanke@yahoo.com +- **GitHub**: `Open a discussion `_ diff --git a/docs/source/get_involved/contributing.rst b/docs/source/get_involved/contributing.rst new file mode 100644 index 00000000..f8282f1c --- /dev/null +++ b/docs/source/get_involved/contributing.rst @@ -0,0 +1,250 @@ +.. _contributing: + +============ +Contributing +============ + +Thank you for your interest in contributing to Hyperactive! This guide will help +you get started with development and submit your contributions. + + +How to Contribute +----------------- + +Contribution Workflow +^^^^^^^^^^^^^^^^^^^^^ + +1. **Fork the repository** on GitHub +2. **Clone your fork** locally +3. **Create a branch** for your changes +4. **Make your changes** with tests +5. **Run the test suite** to ensure everything works +6. **Submit a pull request** for review + + +Types of Contributions +^^^^^^^^^^^^^^^^^^^^^^ + +We welcome many types of contributions: + +- **Bug fixes**: Fix issues and improve stability +- **New features**: Add new optimizers, experiments, or integrations +- **Documentation**: Improve guides, examples, and API docs +- **Tests**: Increase test coverage +- **Performance**: Optimize code for speed or memory + + +Development Setup +----------------- + +Prerequisites +^^^^^^^^^^^^^ + +- Python 3.10 or higher +- Git +- pip + +Setting Up Your Environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. Fork and clone the repository: + + .. code-block:: bash + + git clone https://github.com/YOUR-USERNAME/Hyperactive.git + cd Hyperactive + +2. Create a virtual environment: + + .. code-block:: bash + + python -m venv venv + source venv/bin/activate # On Windows: venv\Scripts\activate + +3. Install in development mode with test dependencies: + + .. code-block:: bash + + pip install -e ".[test,docs]" + +4. Verify the installation: + + .. code-block:: bash + + python -c "import hyperactive; print(hyperactive.__version__)" + + +Running Tests +^^^^^^^^^^^^^ + +Run the test suite to ensure everything works: + +.. code-block:: bash + + # Run all tests + pytest + + # Run with coverage report + pytest --cov=hyperactive + + # Run specific test file + pytest tests/test_specific.py + + # Run tests matching a pattern + pytest -k "test_hill_climbing" + + +Code Style +---------- + +Formatting +^^^^^^^^^^ + +Hyperactive uses `Black `_ for code formatting +and `Ruff `_ for linting: + +.. code-block:: bash + + # Format code + black src/hyperactive tests + + # Check linting + ruff check src/hyperactive tests + + # Auto-fix linting issues + ruff check --fix src/hyperactive tests + + +Docstrings +^^^^^^^^^^ + +Use NumPy-style docstrings for all public functions and classes: + +.. code-block:: python + + def my_function(param1, param2): + """Short description of the function. + + Longer description if needed. + + Parameters + ---------- + param1 : type + Description of param1. + param2 : type + Description of param2. + + Returns + ------- + type + Description of return value. + + Examples + -------- + >>> my_function(1, 2) + 3 + """ + return param1 + param2 + + +Type Hints +^^^^^^^^^^ + +Add type hints to function signatures: + +.. code-block:: python + + def optimize( + self, + search_space: dict, + n_iter: int, + experiment: Callable, + ) -> dict: + ... + + +Submitting Changes +------------------ + +Creating a Pull Request +^^^^^^^^^^^^^^^^^^^^^^^ + +1. **Create a branch** for your changes: + + .. code-block:: bash + + git checkout -b feature/my-new-feature + +2. **Make your changes** and commit: + + .. code-block:: bash + + git add . + git commit -m "Add my new feature" + +3. **Push to your fork**: + + .. code-block:: bash + + git push origin feature/my-new-feature + +4. **Open a pull request** on GitHub from your branch to the main repository. + + +Pull Request Guidelines +^^^^^^^^^^^^^^^^^^^^^^^ + +- **Clear title**: Describe what the PR does +- **Description**: Explain the changes and motivation +- **Tests**: Include tests for new functionality +- **Documentation**: Update docs if needed +- **Small scope**: Keep PRs focused on one thing + + +Commit Messages +^^^^^^^^^^^^^^^ + +Write clear, descriptive commit messages: + +.. code-block:: text + + Add Bayesian optimizer warm start support + + - Add warm_start parameter to BayesianOptimizer + - Update documentation with usage examples + - Add tests for warm start functionality + + +Review Process +-------------- + +What to Expect +^^^^^^^^^^^^^^ + +1. **Automated checks**: CI will run tests and linting +2. **Code review**: Maintainers will review your code +3. **Feedback**: You may be asked to make changes +4. **Merge**: Once approved, your PR will be merged + + +Response Time +^^^^^^^^^^^^^ + +Maintainers are volunteers, so response times may vary. We aim to: + +- Acknowledge PRs within a few days +- Provide initial review within a week +- Merge approved PRs promptly + + +Getting Help +------------ + +If you need help: + +- Check existing `issues `_ + and `discussions `_ +- Ask on `Discord `_ +- Tag @SimonBlanke in your PR for attention + +Thank you for contributing to Hyperactive! diff --git a/docs/source/get_started.rst b/docs/source/get_started.rst new file mode 100644 index 00000000..1bb1ac1f --- /dev/null +++ b/docs/source/get_started.rst @@ -0,0 +1,111 @@ +.. _get_started: + +=========== +Get Started +=========== + +This guide will help you get up and running with Hyperactive in just a few minutes. +By the end, you'll understand the core concepts and be able to run your first optimization. + +Quick Start +----------- + +Hyperactive makes hyperparameter optimization simple. Here's a complete example +that optimizes a custom function: + +.. literalinclude:: _snippets/getting_started/quick_start.py + :language: python + :start-after: # [start:full_example] + :end-before: # [end:full_example] + +That's it! Let's break down what happened: + +1. **Objective function**: A callable that takes a dictionary of parameters and returns a score. + Hyperactive **maximizes** this score by default. + +2. **Search space**: A dictionary mapping parameter names to their possible values. + Use NumPy arrays or lists to define discrete search spaces. + +3. **Optimizer**: Choose from 20+ optimization algorithms. Each optimizer explores the + search space differently to find optimal parameters. + + +First Steps +----------- + +Optimizing a Scikit-learn Model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The most common use case is tuning machine learning models. Here's how to optimize +a Random Forest classifier: + +.. literalinclude:: _snippets/getting_started/sklearn_random_forest.py + :language: python + :start-after: # [start:full_example] + :end-before: # [end:full_example] + + +Using the Sklearn Integration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For even simpler sklearn integration, use the ``OptCV`` wrapper that behaves like +scikit-learn's ``GridSearchCV``: + +.. literalinclude:: _snippets/getting_started/sklearn_optcv.py + :language: python + :start-after: # [start:full_example] + :end-before: # [end:full_example] + + +Choosing an Optimizer +^^^^^^^^^^^^^^^^^^^^^ + +Hyperactive provides many optimization algorithms. Here are some common choices: + +.. list-table:: + :header-rows: 1 + :widths: 25 75 + + * - Optimizer + - Best For + * - ``HillClimbing`` + - Quick local optimization, good starting point + * - ``RandomSearch`` + - Exploring large search spaces, baseline comparison + * - ``BayesianOptimizer`` + - Expensive evaluations, smart exploration + * - ``ParticleSwarmOptimizer`` + - Multi-modal problems, avoiding local optima + * - ``GeneticAlgorithm`` + - Complex landscapes, combinatorial problems + +Example with Bayesian Optimization: + +.. literalinclude:: _snippets/getting_started/bayesian_optimizer.py + :language: python + :start-after: # [start:full_example] + :end-before: # [end:full_example] + +.. literalinclude:: _snippets/getting_started/bayesian_optimizer.py + :language: python + :start-after: # [start:optimizer_usage] + :end-before: # [end:optimizer_usage] + + +Next Steps +---------- + +Now that you've seen the basics, explore these topics: + +- :ref:`installation` - Detailed installation instructions +- :ref:`user_guide` - In-depth tutorials and concepts +- :ref:`api_reference` - Complete API documentation +- :ref:`examples` - More code examples + +Key Concepts to Learn +^^^^^^^^^^^^^^^^^^^^^ + +1. **Experiments**: Abstractions that define *what* to optimize (see :ref:`user_guide_experiments`) +2. **Optimizers**: Algorithms that define *how* to optimize (see :ref:`user_guide_optimizers`) +3. **Search Spaces**: Define the parameter ranges to explore +4. **Integrations**: Built-in support for sklearn, sktime, and PyTorch (see :ref:`user_guide_integrations`) diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..23759cb3 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,498 @@ +.. _home: + +.. raw:: html + +
+
+

Hyperactive

+

A unified interface for optimization algorithms and experiments

+
+
+ + +
+
+
31
+
Algorithms
+
+
+
3
+
Backends
+
+
+
5
+
Integrations
+
+
+
1
+
Unified API
+
+
+ + +Hyperactive provides a collection of optimization algorithms, accessible through a unified +experiment-based interface that separates optimization problems from algorithms. The library +provides native implementations of algorithms from the Gradient-Free-Optimizers package +alongside direct interfaces to Optuna and scikit-learn optimizers. + +.. raw:: html + + + + +---- + +.. _features: + +Features +================ + +What makes Hyperactive stand out for optimization tasks. + +.. grid:: 1 2 3 3 + :gutter: 4 + + .. grid-item-card:: + :class-card: feature-card + + **20+ Optimization Algorithms** + ^^^ + From Hill Climbing to Bayesian Optimization, + Particle Swarm, Genetic Algorithms, and more. + + +++ + :doc:`Local, global, population-based, and sequential methods ` + + .. grid-item-card:: + :class-card: feature-card + + **Direct ML Integration** + ^^^ + First-class support for scikit-learn, sktime, skpro, and PyTorch. + Tune models with minimal code changes. + + +++ + :doc:`Works with any estimator implementing fit/score ` + + .. grid-item-card:: + :class-card: feature-card + + **Experiment Abstraction** + ^^^ + Clean separation between *what* to optimize (experiments) and + *how* to optimize (algorithms). + + +++ + :doc:`Swap algorithms without changing experiment code ` + + .. grid-item-card:: + :class-card: feature-card + + **3 Optimizer Backends** + ^^^ + Native GFO algorithms, Optuna samplers, and scikit-learn + search methods through one unified API. + + +++ + :doc:`GFO · Optuna · sklearn ` + + .. grid-item-card:: + :class-card: feature-card + + **Mixed Parameter Spaces** + ^^^ + Categorical, integer, and continuous parameters. + Define search spaces with NumPy arrays or lists. + + +++ + :doc:`Optuna backend supports native continuous ranges ` + + .. grid-item-card:: + :class-card: feature-card + + **Stable Since 2019** + ^^^ + 5+ years of development, comprehensive test coverage, + and active maintenance. + + +++ + :doc:`Type-annotated · Documented · Tested ` + +---- + +Optimization Backends +================== + +Hyperactive provides a unified interface to three powerful optimization backends. +Choose the one that best fits your needs, or switch between them effortlessly. + +.. grid:: 1 1 3 3 + :gutter: 4 + + .. grid-item-card:: + :class-card: backend-card backend-card-gfo + + `Gradient-Free-Optimizers `__ + ^^^ + The native backend with 20 optimization algorithms implemented from scratch. + Ideal for custom objective functions and research applications. + + - Hill Climbing variants + - Simulated Annealing + - Particle Swarm & Genetic Algorithms + - Bayesian Optimization + - And 15+ more algorithms + + +++ + :doc:`Explore GFO algorithms ` + + .. grid-item-card:: + :class-card: backend-card backend-card-optuna + + `Optuna `__ + ^^^ + Industry-standard hyperparameter optimization framework with + state-of-the-art samplers and pruning strategies. + + - Tree-Parzen Estimator (TPE) + - CMA-ES for continuous spaces + - Gaussian Process optimization + - Multi-objective (NSGA-II/III) + - Native continuous parameter support + + +++ + :doc:`Explore Optuna samplers ` + + .. grid-item-card:: + :class-card: backend-card backend-card-sklearn + + `scikit-learn `__ + ^^^ + Familiar scikit-learn search interfaces with enhanced integration + for cross-validation experiments. + + - GridSearchCV + - RandomizedSearchCV + - HalvingGridSearchCV + - HalvingRandomSearchCV + - Drop-in sklearn compatibility + + +++ + :doc:`Explore sklearn integration ` + +---- + +Integrations +============ + +Hyperactive works seamlessly with popular machine learning frameworks. + +.. raw:: html + + + +---- + +Quick Install +============= + +.. raw:: html + +
+ +
+
+
$ pip install hyperactive
+
+
+
$ pip install hyperactive[all_extras]
+
+
+
$ pip install hyperactive[sklearn-integration]
+
+
+
$ pip install hyperactive[sktime-integration]
+
+
+
+ +---- + +Quick Example +============= + +Get started in just a few lines of code: + +.. raw:: html + +
+ +
+
+ +.. literalinclude:: _snippets/getting_started/index_custom_function.py + :language: python + :start-after: # [start:full_example] + :end-before: # [end:full_example] + +.. raw:: html + +
+
+ +.. literalinclude:: _snippets/getting_started/index_sklearn_tuning.py + :language: python + :start-after: # [start:full_example] + :end-before: # [end:full_example] + +.. raw:: html + +
+
+ +.. literalinclude:: _snippets/getting_started/index_bayesian.py + :language: python + :start-after: # [start:full_example] + :end-before: # [end:full_example] + +.. raw:: html + +
+
+
+ + + +---- + +Contents +======== + +.. toctree:: + :maxdepth: 1 + :hidden: + + get_started + installation + user_guide + api_reference + examples + faq + troubleshooting + get_involved + about + +.. raw:: html + + diff --git a/docs/source/installation.rst b/docs/source/installation.rst new file mode 100644 index 00000000..75554816 --- /dev/null +++ b/docs/source/installation.rst @@ -0,0 +1,205 @@ +.. _installation: + +============ +Installation +============ + +Hyperactive can be installed via pip and supports Python |python_version_range|. + +Installing Hyperactive +---------------------- + +Basic Installation +^^^^^^^^^^^^^^^^^^ + +Install Hyperactive from PyPI using pip: + +.. code-block:: bash + + pip install hyperactive + +This installs Hyperactive with its core dependencies, which is sufficient for most use cases +including scikit-learn integration. + + +Installation with Extras +^^^^^^^^^^^^^^^^^^^^^^^^ + +For additional functionality, you can install optional extras: + +.. code-block:: bash + + # Full installation with all extras (Optuna, PyTorch Lightning, etc.) + pip install hyperactive[all_extras] + + # Scikit-learn integration (included by default) + pip install hyperactive[sklearn-integration] + + # Sktime/skpro integration for time series + pip install hyperactive[sktime-integration] + + +Development Installation +^^^^^^^^^^^^^^^^^^^^^^^^ + +To install Hyperactive for development (from source): + +.. code-block:: bash + + # Clone the repository + git clone https://github.com/SimonBlanke/Hyperactive.git + cd Hyperactive + + # Install in development mode with test dependencies + pip install -e ".[test]" + + # Or install with all development dependencies + pip install -e ".[test,docs]" + + +Dependencies +------------ + +Core Dependencies +^^^^^^^^^^^^^^^^^ + +Hyperactive requires the following packages (automatically installed): + +.. list-table:: + :header-rows: 1 + :widths: 30 70 + + * - Package + - Purpose + * - ``numpy >= 1.18.1`` + - Numerical operations and array handling + * - ``pandas < 3.0.0`` + - Data manipulation and results handling + * - ``tqdm >= 4.48.0`` + - Progress bars during optimization + * - ``gradient-free-optimizers >= 1.2.4`` + - Core optimization algorithms + * - ``scikit-base < 1.0.0`` + - Base classes for sklearn-like interfaces + * - ``scikit-learn < 1.8.0`` + - Machine learning integration + + +Optional Dependencies +--------------------- + +Depending on your use case, you may want to install additional packages: + +Optuna Backend +^^^^^^^^^^^^^^ + +For Optuna-based optimizers (TPE, CMA-ES, NSGA-II, etc.): + +.. code-block:: bash + + pip install optuna + +Or include it via the ``all_extras`` option: + +.. code-block:: bash + + pip install hyperactive[all_extras] + + +Time Series (sktime/skpro) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For time series forecasting and probabilistic prediction: + +.. code-block:: bash + + pip install hyperactive[sktime-integration] + +This installs ``sktime`` and ``skpro`` for time series optimization experiments. + +.. note:: + + Sktime integration requires Python < 3.14 due to sktime's current compatibility. + + +PyTorch Lightning +^^^^^^^^^^^^^^^^^ + +For deep learning hyperparameter optimization: + +.. code-block:: bash + + pip install hyperactive[all_extras] + # or + pip install lightning + +.. note:: + + PyTorch/Lightning requires Python < 3.14 for full compatibility. + + +Verifying Installation +---------------------- + +After installation, verify that Hyperactive is working correctly: + +.. literalinclude:: _snippets/installation/verify_installation.py + :language: python + :start-after: # [start:verify_installation] + :end-before: # [end:verify_installation] + + +Python Version Support +---------------------- + +Hyperactive officially supports Python |python_versions_list|. + +.. note:: + + The supported Python versions are automatically extracted from the project's + ``pyproject.toml`` classifiers. + + Some optional integrations (sktime, PyTorch) may have more restrictive + Python version requirements. Check the specific package documentation + for details. + + +Troubleshooting +--------------- + +Common Installation Issues +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**ImportError: No module named 'gradient_free_optimizers'** + +This usually means the installation was incomplete. Try: + +.. code-block:: bash + + pip install --upgrade hyperactive + +**MemoryError during optimization** + +Sequential model-based optimizers (Bayesian, TPE) can use significant memory +for large search spaces. Reduce your search space size or use a simpler optimizer +like ``RandomSearch`` or ``HillClimbing``. + +**Pickle errors with multiprocessing** + +Ensure all objects in your search space are serializable (no lambdas, closures, +or bound methods). Use top-level functions and basic Python types. + +For more help, see the `GitHub Issues `_. + + +Using Older Versions +-------------------- + +If you need to use Hyperactive v4, you can install a specific version: + +.. code-block:: bash + + pip install hyperactive==4.8.0 + +Documentation for v4 is available at: +`Legacy Documentation (v4) `_ diff --git a/docs/source/troubleshooting.rst b/docs/source/troubleshooting.rst new file mode 100644 index 00000000..e265a0af --- /dev/null +++ b/docs/source/troubleshooting.rst @@ -0,0 +1,39 @@ +.. _troubleshooting_guide: + +=============== +Troubleshooting +=============== + +This guide helps you diagnose and fix common issues with Hyperactive. + +.. toctree:: + :maxdepth: 1 + + troubleshooting/installation + troubleshooting/runtime_errors + troubleshooting/performance + troubleshooting/results + troubleshooting/experiments + troubleshooting/getting_help + + +Overview +-------- + +:ref:`troubleshooting_installation` + Import errors, missing modules, and dependency issues. + +:ref:`troubleshooting_runtime` + AttributeError, TypeError, and ValueError during execution. + +:ref:`troubleshooting_performance` + Slow optimization and memory errors. + +:ref:`troubleshooting_results` + Inconsistent results, local optima, and unexpected scores. + +:ref:`troubleshooting_experiments` + Issues with sklearn, PyTorch Lightning, and other integrations. + +:ref:`troubleshooting_help` + Where to get additional support. diff --git a/docs/source/troubleshooting/experiments.rst b/docs/source/troubleshooting/experiments.rst new file mode 100644 index 00000000..0dd60150 --- /dev/null +++ b/docs/source/troubleshooting/experiments.rst @@ -0,0 +1,53 @@ +.. _troubleshooting_experiments: + +=========================== +Experiment-Specific Issues +=========================== + +SklearnCvExperiment Not Finding Best Parameters +------------------------------------------------ + +**Cause**: Search space doesn't include good values or not enough iterations. + +**Solutions**: + +1. Verify search space includes reasonable values: + + .. code-block:: python + + # Make sure these are sensible for your model + search_space = { + "n_estimators": [10, 50, 100, 200, 500], + "max_depth": [None, 3, 5, 10, 20], + } + +2. Increase iterations or use smarter optimizer: + + .. code-block:: python + + optimizer = BayesianOptimizer( + search_space=space, + n_iter=200, # More iterations + experiment=experiment, + ) + + +PyTorch Lightning Metric Not Found +---------------------------------- + +**Cause**: The metric name doesn't match what's logged during training. + +**Solution**: Check your Lightning module logs the correct metric: + +.. code-block:: python + + class MyModel(L.LightningModule): + def validation_step(self, batch, batch_idx): + loss = self.compute_loss(batch) + self.log("val_loss", loss) # Must match objective_metric + + experiment = TorchLightningExperiment( + lightning_module=MyModel, + objective_metric="val_loss", # Must match self.log name + ... + ) diff --git a/docs/source/troubleshooting/getting_help.rst b/docs/source/troubleshooting/getting_help.rst new file mode 100644 index 00000000..c594961a --- /dev/null +++ b/docs/source/troubleshooting/getting_help.rst @@ -0,0 +1,20 @@ +.. _troubleshooting_help: + +================ +Getting More Help +================ + +If your issue isn't covered in this troubleshooting guide: + +1. **Check the FAQ** — See :ref:`faq` for common questions + +2. **Search existing issues** — Browse `GitHub Issues `_ + +3. **Open a new issue** — Include: + + - Your code (minimal reproducible example) + - Full error traceback + - Hyperactive version (``hyperactive.__version__``) + - Python version + +4. **Join the community** — Ask on `Discord `_ diff --git a/docs/source/troubleshooting/installation.rst b/docs/source/troubleshooting/installation.rst new file mode 100644 index 00000000..ced1c9d8 --- /dev/null +++ b/docs/source/troubleshooting/installation.rst @@ -0,0 +1,61 @@ +.. _troubleshooting_installation: + +=================== +Installation Issues +=================== + +ImportError: No module named 'hyperactive' +------------------------------------------ + +**Cause**: Hyperactive is not installed or installed in a different environment. + +**Solution**: + +.. code-block:: bash + + pip install hyperactive + + # Or with extras + pip install hyperactive[all_extras] + +Verify installation: + +.. code-block:: bash + + python -c "import hyperactive; print(hyperactive.__version__)" + + +ImportError: cannot import name 'Hyperactive' +--------------------------------------------- + +**Cause**: You're using v4 code with Hyperactive v5. The ``Hyperactive`` class +was removed in v5. + +**Solution**: Update your imports. See :ref:`user_guide_migration` for details. + +.. code-block:: python + + # Old (v4) + from hyperactive import Hyperactive + + # New (v5) + from hyperactive.opt.gfo import HillClimbing + + +Missing Optional Dependencies +----------------------------- + +**Cause**: Some features require additional packages. + +**Solution**: Install the appropriate extras: + +.. code-block:: bash + + # For scikit-learn integration + pip install hyperactive[sklearn-integration] + + # For Optuna backend + pip install hyperactive[optuna] + + # For all extras + pip install hyperactive[all_extras] diff --git a/docs/source/troubleshooting/performance.rst b/docs/source/troubleshooting/performance.rst new file mode 100644 index 00000000..84298d0a --- /dev/null +++ b/docs/source/troubleshooting/performance.rst @@ -0,0 +1,69 @@ +.. _troubleshooting_performance: + +================== +Performance Issues +================== + +Optimization is Very Slow +------------------------- + +**Possible causes and solutions**: + +1. **Slow objective function** + + The optimizer can only be as fast as your objective. Consider: + + - Reducing cross-validation folds (``cv=3`` instead of ``cv=10``) + - Using a subset of data during tuning + - Using a simpler model for initial exploration + +2. **Too many iterations** + + Start with fewer iterations: + + .. code-block:: python + + optimizer = HillClimbing( + search_space=space, + n_iter=50, # Start small + experiment=objective, + ) + +3. **Overly large search space** + + Reduce granularity or the number of parameters: + + .. code-block:: python + + # Instead of 1000 values + "learning_rate": np.linspace(0.001, 0.1, 1000) + + # Use 20-50 values + "learning_rate": np.logspace(-3, -1, 20) + + +Memory Errors +------------- + +**Cause**: Very large search spaces can cause memory issues with some optimizers, +especially those that cache all combinations. + +**Solution**: + +1. Reduce search space size +2. Use sampling-based optimizers (``RandomSearch``, ``BayesianOptimizer``) +3. Use coarser parameter granularity + +.. code-block:: python + + # High memory usage + search_space = { + "a": np.linspace(0, 1, 10000), + "b": np.linspace(0, 1, 10000), + } # 100 million combinations! + + # Lower memory usage + search_space = { + "a": np.linspace(0, 1, 100), + "b": np.linspace(0, 1, 100), + } # 10,000 combinations diff --git a/docs/source/troubleshooting/results.rst b/docs/source/troubleshooting/results.rst new file mode 100644 index 00000000..bb6ef9d8 --- /dev/null +++ b/docs/source/troubleshooting/results.rst @@ -0,0 +1,78 @@ +.. _troubleshooting_results: + +==================== +Optimization Results +==================== + +Results Vary Between Runs +------------------------- + +**Cause**: Optimization algorithms are stochastic. + +**Solution**: Set a random seed for reproducibility: + +.. code-block:: python + + optimizer = HillClimbing( + search_space=space, + n_iter=100, + experiment=objective, + random_state=42, + ) + + +Optimizer Gets Stuck in Local Optima +------------------------------------ + +**Cause**: Local search algorithms (like HillClimbing) can get trapped. + +**Solutions**: + +1. Use a global search algorithm: + + .. code-block:: python + + from hyperactive.opt.gfo import RandomSearch, BayesianOptimizer + +2. Use population-based methods: + + .. code-block:: python + + from hyperactive.opt.gfo import ParticleSwarmOptimizer, GeneticAlgorithm + +3. Increase exploration in local search: + + .. code-block:: python + + optimizer = HillClimbing( + search_space=space, + n_iter=100, + experiment=objective, + epsilon=0.2, # Larger steps + ) + + +Best Score is Very Low or Negative +---------------------------------- + +**Check these**: + +1. **Objective function errors** — Make sure your objective doesn't crash: + + .. code-block:: python + + def objective(params): + try: + score = evaluate(params) + return score + except Exception as e: + print(f"Error: {e}") # Debug + return -np.inf + +2. **Sign convention** — Hyperactive maximizes. Negate if minimizing: + + .. code-block:: python + + def objective(params): + error = compute_error(params) + return -error # Negate for minimization diff --git a/docs/source/troubleshooting/runtime_errors.rst b/docs/source/troubleshooting/runtime_errors.rst new file mode 100644 index 00000000..a0b29a05 --- /dev/null +++ b/docs/source/troubleshooting/runtime_errors.rst @@ -0,0 +1,60 @@ +.. _troubleshooting_runtime: + +============== +Runtime Errors +============== + +AttributeError: 'X' object has no attribute 'run' +------------------------------------------------- + +**Cause**: Using v4 method names with v5 optimizers. + +**Solution**: Use ``.solve()`` instead of ``.run()``: + +.. code-block:: python + + # Old (v4) + hyper.run() + + # New (v5) + best_params = optimizer.solve() + + +TypeError: unexpected keyword argument +-------------------------------------- + +**Cause**: Parameter passing changed in v5. All configuration now goes +to the optimizer constructor. + +**Solution**: + +.. code-block:: python + + # Old (v4) + hyper.add_search(model, space, optimizer=opt, n_iter=100) + + # New (v5) + optimizer = HillClimbing( + search_space=space, + n_iter=100, + experiment=objective, + ) + + +ValueError: Parameters do not match +----------------------------------- + +**Cause**: Your search space keys don't match what the experiment expects. + +**Solution**: Ensure search space keys match the parameters your objective +function or experiment expects: + +.. code-block:: python + + # Search space defines "learning_rate" + search_space = {"learning_rate": [0.01, 0.1]} + + # Objective must use the same key + def objective(params): + lr = params["learning_rate"] # Not "lr" or "LearningRate" + ... diff --git a/docs/source/user_guide.rst b/docs/source/user_guide.rst new file mode 100644 index 00000000..6d6f7e23 --- /dev/null +++ b/docs/source/user_guide.rst @@ -0,0 +1,99 @@ +.. _user_guide: + +========== +User Guide +========== + +This guide covers Hyperactive's core concepts and features in depth. +Whether you're new to hyperparameter optimization or an experienced practitioner, +you'll find detailed explanations and practical examples here. + +.. note:: + + Some code snippets in this guide are **illustrative** and may contain + placeholders (like ``score`` or ``SomeOptimizer``). For complete, runnable + examples, see the :ref:`examples` or :ref:`get_started` sections. + +.. toctree:: + :maxdepth: 1 + + user_guide/introduction + user_guide/search_spaces + user_guide/optimizers + user_guide/experiments + user_guide/integrations + user_guide/migration + + +Overview +-------- + +Hyperactive v5 introduces a clean **experiment-based architecture** that separates +optimization algorithms from optimization problems: + +- **Experiments** define *what* to optimize — the objective function and evaluation logic +- **Optimizers** define *how* to optimize — the search strategy and algorithm + +This design allows you to: + +- Mix and match any optimizer with any experiment type +- Create reusable experiment definitions for common ML tasks +- Easily switch between different optimization strategies +- Build complex optimization workflows with consistent interfaces + +Basic Workflow +^^^^^^^^^^^^^^ + +Every Hyperactive optimization follows this pattern: + +.. code-block:: python + + from hyperactive.opt.gfo import HillClimbing + + # 1. Define the experiment (what to optimize) + def objective(params): + return score # Hyperactive maximizes this + + # 2. Define the search space + search_space = { + "param1": [value1, value2, ...], + "param2": [value1, value2, ...], + } + + # 3. Choose an optimizer (how to optimize) + optimizer = HillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, + ) + + # 4. Run the optimization + best_params = optimizer.solve() + + +Guide Contents +-------------- + +:ref:`user_guide_introduction` + Core concepts: optimizers, experiments, and search spaces. + Start here to understand Hyperactive's architecture. + +:ref:`user_guide_search_spaces` + Best practices for designing search spaces. + Covers scaling, granularity, and common patterns. + +:ref:`user_guide_optimizers` + Detailed guide to choosing and configuring optimizers. + Covers local search, global search, population methods, and Bayesian approaches. + +:ref:`user_guide_experiments` + How to define optimization problems using experiments. + Includes custom functions and built-in ML experiments. + +:ref:`user_guide_integrations` + Framework integrations for scikit-learn, sktime, skpro, and PyTorch. + Drop-in replacements for GridSearchCV and similar tools. + +:ref:`user_guide_migration` + Migration guide for upgrading from Hyperactive v4 to v5. + Covers API changes, new patterns, and troubleshooting. diff --git a/docs/source/user_guide/experiments.rst b/docs/source/user_guide/experiments.rst new file mode 100644 index 00000000..7ee388ef --- /dev/null +++ b/docs/source/user_guide/experiments.rst @@ -0,0 +1,137 @@ +.. _user_guide_experiments: + +=========== +Experiments +=========== + +Experiments define *what* to optimize in Hyperactive. They encapsulate the objective +function and any evaluation logic needed to score a set of parameters. + +Defining Experiments +-------------------- + +There are two ways to define experiments in Hyperactive: + +1. **Custom functions** — Simple callables for any optimization problem +2. **Built-in experiment classes** — Pre-built experiments for common ML tasks + + +Custom Objective Functions +-------------------------- + +The simplest way to define an experiment is as a Python function that takes +a parameter dictionary and returns a score: + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:simple_objective] + :end-before: # [end:simple_objective] + +Key points: + +- The function receives a dictionary with parameter names as keys +- It must return a single numeric value (the score) +- Hyperactive **maximizes** this score by default +- To minimize, negate your loss function (as shown above) + + +Example: Optimizing a Mathematical Function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:ackley_function] + :end-before: # [end:ackley_function] + + +Example: Optimizing with External Resources +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Your objective function can use any Python code: + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:external_simulation] + :end-before: # [end:external_simulation] + + +Built-in Experiment Classes +--------------------------- + +For common machine learning tasks, Hyperactive provides ready-to-use experiment classes +that handle cross-validation, scoring, and other details. + + +SklearnCvExperiment +^^^^^^^^^^^^^^^^^^^ + +For optimizing scikit-learn estimators with cross-validation: + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:sklearn_cv_experiment] + :end-before: # [end:sklearn_cv_experiment] + + +SktimeForecastingExperiment +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For time series forecasting optimization (requires ``sktime``): + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:sktime_forecasting] + :end-before: # [end:sktime_forecasting] + + +TorchExperiment +^^^^^^^^^^^^^^^ + +For PyTorch Lightning model optimization (requires ``lightning``): + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:torch_experiment] + :end-before: # [end:torch_experiment] + + +Benchmark Experiments +--------------------- + +Hyperactive includes standard benchmark functions for testing optimizers: + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:benchmark_experiments] + :end-before: # [end:benchmark_experiments] + + +Using the score() Method +------------------------ + +Experiments can also be evaluated directly using the ``score()`` method: + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:score_method] + :end-before: # [end:score_method] + + +Tips for Designing Experiments +------------------------------ + +1. **Return meaningful scores**: Ensure your score reflects what you want to optimize. + Higher is better (Hyperactive maximizes). + +2. **Handle errors gracefully**: If a parameter combination fails, return a very + low score (e.g., ``-np.inf``) rather than raising an exception. + +3. **Consider computation time**: For expensive experiments, use efficient optimizers + like ``BayesianOptimizer`` that learn from previous evaluations. + +4. **Use reproducibility**: Set random seeds in your experiment for consistent results. + +.. literalinclude:: ../_snippets/user_guide/experiments.py + :language: python + :start-after: # [start:robust_objective] + :end-before: # [end:robust_objective] diff --git a/docs/source/user_guide/integrations.rst b/docs/source/user_guide/integrations.rst new file mode 100644 index 00000000..d45e5da3 --- /dev/null +++ b/docs/source/user_guide/integrations.rst @@ -0,0 +1,147 @@ +.. _user_guide_integrations: + +====================== +Framework Integrations +====================== + +Hyperactive provides seamless integrations with popular machine learning frameworks. +These integrations offer drop-in replacements for tools like ``GridSearchCV``, +making it easy to use any Hyperactive optimizer with your existing code. + + +Scikit-Learn Integration +------------------------ + +The ``OptCV`` class provides a scikit-learn compatible interface for hyperparameter +tuning. It works like ``GridSearchCV`` but supports any Hyperactive optimizer. + +Basic Usage +^^^^^^^^^^^ + +.. literalinclude:: ../_snippets/user_guide/integrations.py + :language: python + :start-after: # [start:optcv_basic] + :end-before: # [end:optcv_basic] + + +Using Different Optimizers +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Any Hyperactive optimizer works with ``OptCV``: + +.. literalinclude:: ../_snippets/user_guide/integrations.py + :language: python + :start-after: # [start:different_optimizers] + :end-before: # [end:different_optimizers] + + +Pipeline Integration +^^^^^^^^^^^^^^^^^^^^ + +``OptCV`` works with sklearn pipelines: + +.. literalinclude:: ../_snippets/user_guide/integrations.py + :language: python + :start-after: # [start:pipeline_integration] + :end-before: # [end:pipeline_integration] + + +Time Series with Sktime +----------------------- + +Hyperactive integrates with ``sktime`` for time series forecasting optimization. + +.. note:: + + Requires ``pip install hyperactive[sktime-integration]`` + + +Forecasting Optimization +^^^^^^^^^^^^^^^^^^^^^^^^ + +Use ``ForecastingOptCV`` to tune forecasters: + +.. literalinclude:: ../_snippets/user_guide/integrations.py + :language: python + :start-after: # [start:forecasting_optcv] + :end-before: # [end:forecasting_optcv] + + +Time Series Classification +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use ``TSCOptCV`` for time series classification: + +.. literalinclude:: ../_snippets/user_guide/integrations.py + :language: python + :start-after: # [start:tsc_optcv] + :end-before: # [end:tsc_optcv] + + +Probabilistic Prediction with Skpro +----------------------------------- + +For probabilistic regression with ``skpro``: + +.. literalinclude:: ../_snippets/user_guide/integrations.py + :language: python + :start-after: # [start:skpro_experiment] + :end-before: # [end:skpro_experiment] + + +PyTorch Lightning Integration +----------------------------- + +For deep learning hyperparameter optimization with PyTorch Lightning: + +.. note:: + + Requires ``pip install hyperactive[all_extras]`` or ``pip install lightning`` + +.. literalinclude:: ../_snippets/user_guide/integrations.py + :language: python + :start-after: # [start:pytorch_lightning] + :end-before: # [end:pytorch_lightning] + + +Choosing the Right Integration +------------------------------ + +.. list-table:: + :header-rows: 1 + :widths: 25 25 50 + + * - Framework + - Integration Class + - Use Case + * - scikit-learn + - ``OptCV`` + - Classification, regression, pipelines + * - sktime + - ``ForecastingOptCV`` + - Time series forecasting + * - sktime + - ``TSCOptCV`` + - Time series classification + * - skpro + - ``SkproProbaRegExperiment`` + - Probabilistic regression + * - PyTorch Lightning + - ``TorchExperiment`` + - Deep learning models + + +Tips for Using Integrations +--------------------------- + +1. **Match the interface**: Use ``OptCV`` when you want sklearn-compatible behavior + (fit/predict). Use experiment classes when you want more control. + +2. **Consider evaluation cost**: Deep learning experiments are expensive. + Use efficient optimizers like ``BayesianOptimizer`` with fewer iterations. + +3. **Use appropriate CV strategies**: Match your cross-validation to your problem + (e.g., ``TimeSeriesSplit`` for time series, stratified splits for imbalanced data). + +4. **Start simple**: Begin with ``GridSearch`` or ``RandomSearch`` to establish + baselines before using more sophisticated optimizers. diff --git a/docs/source/user_guide/introduction.rst b/docs/source/user_guide/introduction.rst new file mode 100644 index 00000000..e000942a --- /dev/null +++ b/docs/source/user_guide/introduction.rst @@ -0,0 +1,179 @@ +.. _user_guide_introduction: + +============ +Introduction +============ + +This page introduces Hyperactive's core concepts: optimizers, experiments, and search spaces. +Understanding these concepts will help you use Hyperactive effectively for any optimization task. + + +Core Concepts +------------- + +Hyperactive is built around three key concepts: + +1. **Experiments** — Define *what* to optimize (the objective function) +2. **Optimizers** — Define *how* to optimize (the search algorithm) +3. **Search Spaces** — Define *where* to search (the parameter ranges) + + +Experiments: What to Optimize +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An **experiment** represents your optimization problem. It takes parameters as input +and returns a score that Hyperactive will maximize. + +The simplest experiment is a Python function: + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:simple_objective] + :end-before: # [end:simple_objective] + +For machine learning, Hyperactive provides built-in experiments: + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:sklearn_experiment_intro] + :end-before: # [end:sklearn_experiment_intro] + +See :ref:`user_guide_experiments` for more details. + + +Optimizers: How to Optimize +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An **optimizer** is the algorithm that explores the search space to find the best parameters. +Hyperactive provides 20+ optimizers in different categories: + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:optimizer_imports] + :end-before: # [end:optimizer_imports] + +Each optimizer has different characteristics: + +- **Local search** (HillClimbing, SimulatedAnnealing): Fast, may get stuck in local optima +- **Global search** (RandomSearch, GridSearch): Thorough exploration, slower +- **Population methods** (GeneticAlgorithm, ParticleSwarm): Good for complex landscapes +- **Sequential methods** (BayesianOptimizer, TPE): Smart exploration, best for expensive evaluations + +See :ref:`user_guide_optimizers` for a complete guide. + + +Search Spaces: Where to Search +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A **search space** defines the possible values for each parameter. +Use dictionaries with lists or NumPy arrays: + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:search_space_definition] + :end-before: # [end:search_space_definition] + +.. tip:: + + Keep search spaces reasonably sized. Very large spaces (>10^8 combinations) + can cause memory issues with some optimizers. + + +Basic Workflow +-------------- + +Here's the complete workflow for using Hyperactive: + +Step 1: Define Your Experiment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Either as a function or using built-in experiment classes: + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:workflow_experiment_options] + :end-before: # [end:workflow_experiment_options] + + +Step 2: Define the Search Space +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:workflow_search_space] + :end-before: # [end:workflow_search_space] + + +Step 3: Choose an Optimizer +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:workflow_optimizer] + :end-before: # [end:workflow_optimizer] + + +Step 4: Run the Optimization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:workflow_solve] + :end-before: # [end:workflow_solve] + + +Common Optimizer Parameters +--------------------------- + +Most optimizers share these parameters: + +.. list-table:: + :header-rows: 1 + :widths: 20 15 65 + + * - Parameter + - Type + - Description + * - ``search_space`` + - dict + - Maps parameter names to possible values + * - ``n_iter`` + - int + - Number of optimization iterations + * - ``experiment`` + - callable + - The objective function or experiment object + * - ``random_state`` + - int + - Seed for reproducibility + * - ``initialize`` + - dict + - Control initial population (warm starts, etc.) + + +Warm Starting +^^^^^^^^^^^^^ + +You can provide starting points for optimization: + +.. literalinclude:: ../_snippets/user_guide/introduction.py + :language: python + :start-after: # [start:warm_starting] + :end-before: # [end:warm_starting] + + +Tips for Effective Optimization +------------------------------- + +1. **Start simple**: Begin with ``HillClimbing`` or ``RandomSearch`` to establish baselines. + +2. **Right-size your search space**: Large spaces need more iterations. Consider using + ``np.logspace`` for parameters that span orders of magnitude. + +3. **Use appropriate iterations**: More iterations = better exploration, but longer runtime. + A good rule of thumb: at least 10x the number of parameters. + +4. **Set random_state**: For reproducible results, always set a random seed. + +5. **Consider your budget**: For expensive evaluations (training large models), + use smart optimizers like ``BayesianOptimizer`` that learn from previous evaluations. diff --git a/docs/source/user_guide/migration.rst b/docs/source/user_guide/migration.rst new file mode 100644 index 00000000..6a73c075 --- /dev/null +++ b/docs/source/user_guide/migration.rst @@ -0,0 +1,361 @@ +.. _user_guide_migration: + +====================== +Migration Guide (v4→v5) +====================== + +This guide helps you migrate from Hyperactive v4 to v5. Version 5 introduces +a new experiment-based architecture with a simplified API. + +.. note:: + + If you're still using Hyperactive v4 and need documentation for that version, + see the `Legacy Documentation (v4) `_. + +Quick Summary +------------- + +The main changes in v5 are: + +1. **No more ``Hyperactive`` class** — Optimizers are used directly +2. **``.run()`` replaced with ``.solve()``** — Single method for optimization +3. **New import paths** — ``hyperactive.opt.gfo`` instead of ``hyperactive.optimizers`` +4. **Experiment abstraction** — Built-in ML experiments for scikit-learn, sktime, etc. +5. **Constructor-based configuration** — All parameters passed to optimizer constructor + + +Basic Migration +--------------- + +Here's how to convert a v4 script to v5: + +**v4 (Old)** + +.. code-block:: python + + from hyperactive import Hyperactive + from hyperactive.optimizers import HillClimbingOptimizer + + def model(opt): + # Use opt["param"] to access parameters + score = -(opt["x"] ** 2) + return score + + search_space = { + "x": list(range(-10, 10)), + } + + # Create optimizer separately + optimizer = HillClimbingOptimizer(epsilon=0.1) + + # Create Hyperactive instance and add search + hyper = Hyperactive() + hyper.add_search( + model, + search_space, + optimizer=optimizer, + n_iter=100, + ) + hyper.run() + + # Access results + best_params = hyper.best_para(model) + best_score = hyper.best_score(model) + +**v5 (New)** + +.. code-block:: python + + from hyperactive.opt.gfo import HillClimbing + + def objective(params): + # Use params["param"] to access parameters + score = -(params["x"] ** 2) + return score + + search_space = { + "x": list(range(-10, 10)), + } + + # Configure optimizer directly + optimizer = HillClimbing( + search_space=search_space, + n_iter=100, + experiment=objective, + epsilon=0.1, + ) + + # Run optimization + best_params = optimizer.solve() + + # Access results + best_score = optimizer.best_score_ + + +Import Path Changes +------------------- + +The optimizer imports have changed: + +.. list-table:: + :header-rows: 1 + :widths: 50 50 + + * - v4 Import + - v5 Import + * - ``from hyperactive import Hyperactive`` + - Not needed + * - ``from hyperactive.optimizers import HillClimbingOptimizer`` + - ``from hyperactive.opt.gfo import HillClimbing`` + * - ``from hyperactive.optimizers import RandomSearchOptimizer`` + - ``from hyperactive.opt.gfo import RandomSearch`` + * - ``from hyperactive.optimizers import BayesianOptimizer`` + - ``from hyperactive.opt.gfo import BayesianOptimizer`` + +.. tip:: + + In v5, you can also import optimizers directly from ``hyperactive.opt``: + + .. code-block:: python + + from hyperactive.opt import HillClimbing, RandomSearch, BayesianOptimizer + + +Optimizer Name Changes +---------------------- + +Some optimizer class names have changed: + +.. list-table:: + :header-rows: 1 + :widths: 50 50 + + * - v4 Name + - v5 Name + * - ``HillClimbingOptimizer`` + - ``HillClimbing`` + * - ``RandomSearchOptimizer`` + - ``RandomSearch`` + * - ``ParticleSwarmOptimizer`` + - ``ParticleSwarmOptimizer`` (unchanged) + * - ``BayesianOptimizer`` + - ``BayesianOptimizer`` (unchanged) + * - ``TreeStructuredParzenEstimators`` + - ``TreeStructuredParzenEstimators`` (unchanged) + + +Method Changes +-------------- + +.. list-table:: + :header-rows: 1 + :widths: 30 30 40 + + * - Action + - v4 + - v5 + * - Run optimization + - ``hyper.run()`` + - ``optimizer.solve()`` + * - Get best params + - ``hyper.best_para(model)`` + - ``optimizer.best_params_`` + * - Get best score + - ``hyper.best_score(model)`` + - ``optimizer.best_score_`` + + +Scikit-learn Integration +------------------------ + +v5 introduces experiment classes for cleaner ML integration. + +**v4 (Old)** + +.. code-block:: python + + from sklearn.datasets import load_iris + from sklearn.ensemble import RandomForestClassifier + from sklearn.model_selection import cross_val_score + from hyperactive import Hyperactive + from hyperactive.optimizers import HillClimbingOptimizer + + X, y = load_iris(return_X_y=True) + + def model(opt): + clf = RandomForestClassifier( + n_estimators=opt["n_estimators"], + max_depth=opt["max_depth"], + ) + return cross_val_score(clf, X, y, cv=3).mean() + + search_space = { + "n_estimators": [10, 50, 100], + "max_depth": [3, 5, 10], + } + + hyper = Hyperactive() + hyper.add_search(model, search_space, n_iter=50) + hyper.run() + +**v5 (New) — Using SklearnCvExperiment** + +.. code-block:: python + + from sklearn.datasets import load_iris + from sklearn.ensemble import RandomForestClassifier + from hyperactive.experiment.integrations import SklearnCvExperiment + from hyperactive.opt.gfo import HillClimbing + + X, y = load_iris(return_X_y=True) + + experiment = SklearnCvExperiment( + estimator=RandomForestClassifier(), + X=X, y=y, cv=3, + ) + + search_space = { + "n_estimators": [10, 50, 100], + "max_depth": [3, 5, 10], + } + + optimizer = HillClimbing( + search_space=search_space, + n_iter=50, + experiment=experiment, + ) + best_params = optimizer.solve() + +**v5 (Alternative) — Using custom function** + +The v4-style custom function still works in v5: + +.. code-block:: python + + from sklearn.datasets import load_iris + from sklearn.ensemble import RandomForestClassifier + from sklearn.model_selection import cross_val_score + from hyperactive.opt.gfo import HillClimbing + + X, y = load_iris(return_X_y=True) + + def objective(params): + clf = RandomForestClassifier( + n_estimators=params["n_estimators"], + max_depth=params["max_depth"], + ) + return cross_val_score(clf, X, y, cv=3).mean() + + search_space = { + "n_estimators": [10, 50, 100], + "max_depth": [3, 5, 10], + } + + optimizer = HillClimbing( + search_space=search_space, + n_iter=50, + experiment=objective, + ) + best_params = optimizer.solve() + + +New Features in v5 +------------------ + +v5 introduces several new features: + +**Experiment Classes** + +Pre-built experiments for common ML tasks: + +- ``SklearnCvExperiment`` — scikit-learn cross-validation +- ``SktimeForecastingExperiment`` — sktime forecasting +- ``SktimeClassificationExperiment`` — sktime time series classification +- ``SkproProbaRegExperiment`` — skpro probabilistic regression +- ``TorchExperiment`` — PyTorch Lightning models + +**Optuna Backend** + +Access Optuna optimizers through Hyperactive: + +.. code-block:: python + + from hyperactive.opt.optuna import TPEOptimizer, CmaEsOptimizer + +**sklearn-Compatible Interface** + +Drop-in replacement for ``GridSearchCV``: + +.. code-block:: python + + from hyperactive.integrations.sklearn import OptCV + + search = OptCV(estimator=clf, param_space=space, n_iter=50) + search.fit(X, y) + + +Removed Features +---------------- + +The following v4 features are no longer available in v5: + +- ``Hyperactive.add_search()`` — Use optimizer constructor instead +- ``Hyperactive.run()`` — Use ``optimizer.solve()`` instead +- ``search_data`` parameter — Data collection handled differently +- ``memory`` parameter — Memory features restructured +- Multiple parallel searches — Use separate optimizer instances + + +Troubleshooting Migration +------------------------- + +**ImportError: cannot import name 'Hyperactive'** + +The ``Hyperactive`` class no longer exists. Use optimizers directly: + +.. code-block:: python + + # Old + from hyperactive import Hyperactive + + # New + from hyperactive.opt.gfo import HillClimbing + +**AttributeError: 'HillClimbing' object has no attribute 'run'** + +The ``.run()`` method is now ``.solve()``: + +.. code-block:: python + + # Old + hyper.run() + + # New + best_params = optimizer.solve() + +**TypeError: unexpected keyword argument 'optimizer'** + +Optimizer parameters are now passed to the constructor: + +.. code-block:: python + + # Old + hyper.add_search(model, space, optimizer=opt, n_iter=100) + + # New + optimizer = HillClimbing( + search_space=space, + n_iter=100, + experiment=model, + ) + + +Getting Help +------------ + +If you encounter issues migrating: + +1. Check the :ref:`api_reference` for current API +2. See :ref:`examples` for v5 code examples +3. Open an issue on `GitHub `_ +4. Ask on `Discord `_ diff --git a/docs/source/user_guide/optimizers.rst b/docs/source/user_guide/optimizers.rst new file mode 100644 index 00000000..748b393c --- /dev/null +++ b/docs/source/user_guide/optimizers.rst @@ -0,0 +1,325 @@ +.. _user_guide_optimizers: + +========== +Optimizers +========== + +Optimizers define *how* Hyperactive explores the search space to find optimal parameters. +This guide helps you choose the right optimizer for your problem and configure it effectively. + + +Choosing an Optimizer +--------------------- + +The best optimizer depends on your problem characteristics: + +.. list-table:: + :header-rows: 1 + :widths: 25 25 50 + + * - Scenario + - Recommended Optimizers + - Why + * - Quick baseline + - ``HillClimbing``, ``RandomSearch`` + - Fast, simple, good for initial exploration + * - Expensive evaluations + - ``BayesianOptimizer``, ``TPEOptimizer`` + - Learn from past evaluations, minimize function calls + * - Large search space + - ``RandomSearch``, ``ParticleSwarmOptimizer`` + - Good global coverage + * - Multi-modal landscape + - ``GeneticAlgorithm``, ``DifferentialEvolution`` + - Population-based, avoid local optima + * - Small search space + - ``GridSearch`` + - Exhaustive coverage when feasible + + +Optimizer Categories +-------------------- + +Hyperactive organizes optimizers into categories based on their search strategies. + + +Local Search +^^^^^^^^^^^^ + +Local search optimizers explore the neighborhood of the current best solution. +They're fast but may get stuck in local optima. + +**Hill Climbing** + +The simplest local search: always move to a better neighbor. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:hill_climbing] + :end-before: # [end:hill_climbing] + +**Simulated Annealing** + +Like hill climbing, but sometimes accepts worse solutions to escape local optima. +The "temperature" controls exploration vs exploitation. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:simulated_annealing] + :end-before: # [end:simulated_annealing] + +**Repulsing Hill Climbing** + +Remembers visited regions and avoids them, encouraging broader exploration. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:repulsing_hill_climbing] + :end-before: # [end:repulsing_hill_climbing] + +**Stochastic Hill Climbing** + +Hill climbing with a probability of accepting worse solutions. The ``p_accept`` +parameter controls exploration — higher values make it more likely to accept +non-improving moves, helping escape local optima. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:stochastic_hill_climbing] + :end-before: # [end:stochastic_hill_climbing] + +**Downhill Simplex (Nelder-Mead)** + +Uses a simplex of points to navigate the search space. Good for continuous problems. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:downhill_simplex] + :end-before: # [end:downhill_simplex] + + +Global Search +^^^^^^^^^^^^^ + +Global search optimizers explore the entire search space more thoroughly. + +**Random Search** + +Samples random points from the search space. Simple but surprisingly effective baseline. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:random_search] + :end-before: # [end:random_search] + +**Grid Search** + +Evaluates all combinations systematically. Only practical for small search spaces. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:grid_search] + :end-before: # [end:grid_search] + +**Random Restart Hill Climbing** + +Runs hill climbing from multiple random starting points. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:random_restart_hill_climbing] + :end-before: # [end:random_restart_hill_climbing] + +**Powell's Method** and **Pattern Search** + +Classical derivative-free optimization methods. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:powells_pattern] + :end-before: # [end:powells_pattern] + + +Population Methods +^^^^^^^^^^^^^^^^^^ + +Population-based optimizers maintain multiple candidate solutions that evolve together. +They're excellent for complex, multi-modal optimization landscapes. + +**Particle Swarm Optimization** + +Particles "fly" through the search space, influenced by their own best position +and the swarm's best position. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:particle_swarm] + :end-before: # [end:particle_swarm] + +**Genetic Algorithm** + +Evolves a population using selection, crossover, and mutation inspired by biology. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:genetic_algorithm] + :end-before: # [end:genetic_algorithm] + +**Evolution Strategy** + +Similar to genetic algorithms but focused on real-valued optimization. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:evolution_strategy] + :end-before: # [end:evolution_strategy] + +**Differential Evolution** + +Uses vector differences to guide mutation. Excellent for continuous optimization. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:differential_evolution] + :end-before: # [end:differential_evolution] + +**Parallel Tempering** + +Runs multiple chains at different "temperatures" and exchanges information between them. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:parallel_tempering] + :end-before: # [end:parallel_tempering] + +**Spiral Optimization** + +Particles spiral toward the best solution found so far. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:spiral_optimization] + :end-before: # [end:spiral_optimization] + + +Sequential Model-Based (Bayesian) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These optimizers build a model of the objective function and use it to decide +where to sample next. Best for expensive evaluations. + +**Bayesian Optimization** + +Uses Gaussian Process regression to model the objective and acquisition functions +to balance exploration and exploitation. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:bayesian_optimizer] + :end-before: # [end:bayesian_optimizer] + +**Tree-Structured Parzen Estimators (TPE)** + +Models the distribution of good and bad parameters separately. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:tpe] + :end-before: # [end:tpe] + +**Forest Optimizer** + +Uses Random Forest to model the objective function. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:forest_optimizer] + :end-before: # [end:forest_optimizer] + +**Lipschitz Optimization** and **DIRECT Algorithm** + +Use Lipschitz continuity assumptions to guide the search. + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:lipschitz_direct] + :end-before: # [end:lipschitz_direct] + + +Optuna Backend +^^^^^^^^^^^^^^ + +Hyperactive provides wrappers for Optuna's powerful samplers: + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:optuna_imports] + :end-before: # [end:optuna_imports] + +Example with Optuna TPE: + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:optuna_tpe] + :end-before: # [end:optuna_tpe] + + +Optimizer Configuration +----------------------- + +Common Parameters +^^^^^^^^^^^^^^^^^ + +All optimizers accept these parameters: + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:common_parameters] + :end-before: # [end:common_parameters] + + +Initialization Strategies +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Control how the optimizer initializes its search: + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:warm_start_example] + :end-before: # [end:warm_start_example] + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:initialization_strategies] + :end-before: # [end:initialization_strategies] + + +Algorithm-Specific Parameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Many optimizers have additional parameters. Check the :ref:`api_reference` for details. + +Example with Simulated Annealing: + +.. literalinclude:: ../_snippets/user_guide/optimizers.py + :language: python + :start-after: # [start:simulated_annealing_config] + :end-before: # [end:simulated_annealing_config] + + +Performance Tips +---------------- + +1. **Start with baselines**: Always run ``RandomSearch`` first to establish + a baseline and understand your objective landscape. + +2. **Match iterations to complexity**: Complex optimizers (Bayesian, population-based) + need more iterations to show their advantages. + +3. **Consider evaluation cost**: For cheap evaluations, simple optimizers work well. + For expensive ones, use model-based approaches. + +4. **Use warm starts**: If you have prior knowledge, warm start can significantly + speed up optimization. + +5. **Set random seeds**: For reproducible results, always set ``random_state``. diff --git a/docs/source/user_guide/search_spaces.rst b/docs/source/user_guide/search_spaces.rst new file mode 100644 index 00000000..ee7fbb4a --- /dev/null +++ b/docs/source/user_guide/search_spaces.rst @@ -0,0 +1,355 @@ +.. _user_guide_search_spaces: + +========================= +Search Space Best Practices +========================= + +This guide covers how to design effective search spaces for hyperparameter optimization. +A well-designed search space can significantly improve optimization results and efficiency. + + +Understanding Search Spaces +--------------------------- + +A search space defines the possible values for each parameter. Hyperactive samples +from these values during optimization. The quality of your search space directly +affects: + +- **Optimization speed**: Smaller, targeted spaces converge faster +- **Solution quality**: Including good values is essential for finding them +- **Memory usage**: Very large spaces can cause memory issues with some optimizers + + +Defining Search Spaces +---------------------- + +Basic Structure +^^^^^^^^^^^^^^^ + +Search spaces are Python dictionaries mapping parameter names to lists of possible values: + +.. code-block:: python + + search_space = { + "n_estimators": [10, 50, 100, 200], + "max_depth": [3, 5, 10, None], + "min_samples_split": [2, 5, 10], + } + + +Discrete Values +^^^^^^^^^^^^^^^ + +For parameters with a small set of distinct values: + +.. code-block:: python + + search_space = { + # Categorical choices + "kernel": ["linear", "rbf", "poly"], + + # Boolean flags + "fit_intercept": [True, False], + + # Specific integer values + "n_neighbors": [3, 5, 7, 9, 11], + } + + +Continuous Ranges +^^^^^^^^^^^^^^^^^ + +For parameters that vary continuously, use NumPy to create arrays: + +.. code-block:: python + + import numpy as np + + search_space = { + # Linear spacing for uniform ranges + "momentum": np.linspace(0.5, 0.99, 50).tolist(), + + # Log spacing for parameters spanning orders of magnitude + "learning_rate": np.logspace(-4, -1, 50).tolist(), + + # Integer range + "hidden_size": list(range(32, 257, 32)), # 32, 64, 96, ... + } + +.. tip:: + + Convert NumPy arrays to lists with ``.tolist()`` for cleaner code, + though Hyperactive accepts both formats. + + +Scale-Appropriate Spacing +------------------------- + +Linear vs Logarithmic +^^^^^^^^^^^^^^^^^^^^^ + +The spacing between values should match how the parameter affects your objective: + +**Linear spacing** — When changes have proportional effects: + +.. code-block:: python + + # Dropout rate: 0.1 → 0.2 has similar effect as 0.5 → 0.6 + "dropout": np.linspace(0.0, 0.5, 11).tolist() + +**Logarithmic spacing** — When the parameter spans orders of magnitude: + +.. code-block:: python + + # Learning rate: 0.001 → 0.01 is as significant as 0.01 → 0.1 + "learning_rate": np.logspace(-4, -1, 20).tolist() + +Common parameters that benefit from log spacing: + +- Learning rates (``1e-5`` to ``1e-1``) +- Regularization strength (``1e-6`` to ``1e1``) +- Batch sizes (powers of 2: 16, 32, 64, 128, ...) + + +Choosing Granularity +-------------------- + +The number of values per parameter affects the total search space size: + +.. code-block:: python + + # Fine granularity: 100 values per parameter + # 3 parameters → 100^3 = 1,000,000 combinations + "param_a": np.linspace(0, 1, 100).tolist(), + "param_b": np.linspace(0, 1, 100).tolist(), + "param_c": np.linspace(0, 1, 100).tolist(), + + # Coarse granularity: 10 values per parameter + # 3 parameters → 10^3 = 1,000 combinations + "param_a": np.linspace(0, 1, 10).tolist(), + "param_b": np.linspace(0, 1, 10).tolist(), + "param_c": np.linspace(0, 1, 10).tolist(), + +**Guidelines:** + +- Start coarse, refine after initial results +- Use finer granularity for sensitive parameters +- Use coarser granularity for less important parameters + + +Search Space Size Considerations +-------------------------------- + +Calculate your total search space size: + +.. code-block:: python + + from functools import reduce + import operator + + search_space = { + "n_estimators": [10, 50, 100, 200], # 4 values + "max_depth": [3, 5, 10, None], # 4 values + "learning_rate": np.logspace(-3, 0, 20), # 20 values + } + + total_combinations = reduce( + operator.mul, + [len(v) for v in search_space.values()] + ) + print(f"Total combinations: {total_combinations:,}") # 320 + +**Recommendations by search space size:** + +.. list-table:: + :header-rows: 1 + :widths: 25 35 40 + + * - Size + - Recommended Approach + - Optimizer Suggestions + * - <100 + - Grid search (exhaustive) + - ``GridSearch`` + * - 100–10,000 + - Random or local search + - ``RandomSearch``, ``HillClimbing`` + * - 10,000–1,000,000 + - Smart sampling required + - ``BayesianOptimizer``, ``TPE`` + * - >1,000,000 + - Reduce search space or use population methods + - ``ParticleSwarmOptimizer``, ``EvolutionStrategy`` + + +Handling Parameter Dependencies +------------------------------- + +Sometimes parameters have dependencies. Handle these in your objective function: + +.. code-block:: python + + def objective(params): + # Constraint: min_samples_split >= min_samples_leaf + if params["min_samples_split"] < params["min_samples_leaf"]: + return -np.inf # Invalid configuration + + # Constraint: kernel-specific parameters + if params["kernel"] != "poly" and params["degree"] != 3: + return -np.inf # degree only relevant for poly kernel + + # Valid configuration — proceed with evaluation + return evaluate_model(params) + +.. note:: + + Returning ``-np.inf`` effectively removes invalid combinations from consideration. + + +Common Search Space Patterns +---------------------------- + +Scikit-learn Classifiers +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + # Random Forest + rf_space = { + "n_estimators": [50, 100, 200, 500], + "max_depth": [None, 5, 10, 20, 30], + "min_samples_split": [2, 5, 10], + "min_samples_leaf": [1, 2, 4], + "max_features": ["sqrt", "log2", None], + } + + # Gradient Boosting + gb_space = { + "n_estimators": [50, 100, 200], + "learning_rate": np.logspace(-3, 0, 10).tolist(), + "max_depth": [3, 5, 7, 9], + "subsample": np.linspace(0.6, 1.0, 5).tolist(), + } + + # SVM + svm_space = { + "C": np.logspace(-2, 2, 10).tolist(), + "gamma": np.logspace(-4, -1, 10).tolist(), + "kernel": ["rbf", "poly", "sigmoid"], + } + + +Neural Networks +^^^^^^^^^^^^^^^ + +.. code-block:: python + + nn_space = { + "hidden_layers": [1, 2, 3], + "hidden_size": [32, 64, 128, 256], + "learning_rate": np.logspace(-4, -2, 20).tolist(), + "dropout": np.linspace(0.0, 0.5, 6).tolist(), + "batch_size": [16, 32, 64, 128], + "activation": ["relu", "tanh", "elu"], + } + + +Iterative Refinement Strategy +----------------------------- + +A practical approach for finding optimal hyperparameters: + +**Phase 1: Coarse Search** + +.. code-block:: python + + # Wide ranges, few values + coarse_space = { + "learning_rate": [1e-4, 1e-3, 1e-2, 1e-1], + "hidden_size": [32, 128, 512], + "dropout": [0.0, 0.25, 0.5], + } + + optimizer = RandomSearch( + search_space=coarse_space, + n_iter=50, + experiment=objective, + ) + best = optimizer.solve() + # Result: learning_rate=1e-3 works best + +**Phase 2: Fine-tune Around Best Values** + +.. code-block:: python + + # Narrow ranges around best from phase 1 + fine_space = { + "learning_rate": np.logspace(-3.5, -2.5, 20).tolist(), # Around 1e-3 + "hidden_size": list(range(96, 192, 16)), # Around 128 + "dropout": np.linspace(0.2, 0.4, 10).tolist(), # Around 0.25 + } + + optimizer = BayesianOptimizer( + search_space=fine_space, + n_iter=100, + experiment=objective, + ) + final_best = optimizer.solve() + + +Common Mistakes to Avoid +------------------------ + +**1. Overly Large Search Spaces** + +.. code-block:: python + + # Bad: 1000 * 1000 * 1000 = 1 billion combinations + bad_space = { + "param_a": np.linspace(0, 1, 1000).tolist(), + "param_b": np.linspace(0, 1, 1000).tolist(), + "param_c": np.linspace(0, 1, 1000).tolist(), + } + + # Better: 50 * 50 * 50 = 125,000 combinations + better_space = { + "param_a": np.linspace(0, 1, 50).tolist(), + "param_b": np.linspace(0, 1, 50).tolist(), + "param_c": np.linspace(0, 1, 50).tolist(), + } + +**2. Linear Spacing for Log-Scale Parameters** + +.. code-block:: python + + # Bad: most values clustered at high end + bad_lr = np.linspace(0.0001, 0.1, 20).tolist() + # Values: 0.0001, 0.0053, 0.0106, ... (poor coverage of small values) + + # Good: even distribution across magnitudes + good_lr = np.logspace(-4, -1, 20).tolist() + # Values: 0.0001, 0.00016, 0.00025, ... 0.063, 0.1 + +**3. Missing Important Values** + +.. code-block:: python + + # Bad: might miss optimal region entirely + bad_space = {"max_depth": [2, 3, 4]} + + # Better: include None and reasonable range + better_space = {"max_depth": [None, 3, 5, 10, 20, 50]} + +**4. Ignoring Parameter Interactions** + +Some parameters interact strongly. Consider them together: + +.. code-block:: python + + # Learning rate and batch size often interact + # Higher batch sizes often need higher learning rates + search_space = { + "batch_size": [16, 32, 64, 128], + "learning_rate": np.logspace(-4, -1, 20).tolist(), + } + # The optimizer will explore combinations to find the best pairing diff --git a/docs/tests/__init__.py b/docs/tests/__init__.py new file mode 100644 index 00000000..c5de4959 --- /dev/null +++ b/docs/tests/__init__.py @@ -0,0 +1 @@ +"""Documentation tests package.""" diff --git a/docs/tests/test_doc_snippets.py b/docs/tests/test_doc_snippets.py new file mode 100644 index 00000000..0a7ab2e9 --- /dev/null +++ b/docs/tests/test_doc_snippets.py @@ -0,0 +1,270 @@ +"""Test all documentation code snippets. + +This module discovers and tests all Python snippet files in the documentation. +Only snippets in the 'getting_started/' directory are tested for execution, +as they contain complete, runnable examples. + +User guide snippets may contain illustrative code with placeholders and are +not required to be executable - they serve documentation purposes. +""" + +import importlib.util +import re +import subprocess +import sys +from pathlib import Path + +import pytest + +# Path to the snippets directory +SNIPPETS_DIR = Path(__file__).parent.parent / "source" / "_snippets" +# Path to the docs source directory +SOURCE_DIR = Path(__file__).parent.parent / "source" +# Path to the repository root +REPO_ROOT = Path(__file__).parent.parent.parent + + +def get_testable_snippet_files(): + """Collect Python files in testable directories. + + Only includes files from directories that contain complete, runnable examples. + Currently: getting_started/, examples/, installation/ + + Returns + ------- + list[Path] + List of paths to testable Python snippet files. + """ + testable_dirs = ["getting_started", "examples", "installation"] + snippet_files = [] + + for dir_name in testable_dirs: + dir_path = SNIPPETS_DIR / dir_name + if dir_path.exists(): + for path in dir_path.rglob("*.py"): + if path.name not in ("__init__.py", "conftest.py"): + snippet_files.append(path) + + return sorted(snippet_files) + + +def get_all_snippet_files(): + """Collect all Python files in the snippets directory. + + Returns + ------- + list[Path] + List of paths to all Python snippet files. + """ + snippet_files = [] + for path in SNIPPETS_DIR.rglob("*.py"): + if path.name not in ("__init__.py", "conftest.py"): + snippet_files.append(path) + return sorted(snippet_files) + + +def _snippet_id(path): + """Generate a readable test ID for a snippet file.""" + return str(path.relative_to(SNIPPETS_DIR)) + + +@pytest.mark.parametrize("snippet_file", get_testable_snippet_files(), ids=_snippet_id) +def test_snippet_executes(snippet_file): + """Test that each runnable snippet file executes without errors. + + This runs each snippet as a subprocess to ensure isolation between tests + and to catch any import-time errors. + + Parameters + ---------- + snippet_file : Path + Path to the snippet file to test. + """ + result = subprocess.run( + [sys.executable, str(snippet_file)], + capture_output=True, + text=True, + timeout=120, # 2 minute timeout for optimization examples + cwd=str(SNIPPETS_DIR), + ) + + # Provide helpful error message on failure + if result.returncode != 0: + error_msg = f"Snippet {snippet_file.name} failed to execute.\n" + error_msg += f"stdout:\n{result.stdout}\n" + error_msg += f"stderr:\n{result.stderr}" + pytest.fail(error_msg) + + +@pytest.mark.parametrize("snippet_file", get_testable_snippet_files(), ids=_snippet_id) +def test_snippet_imports(snippet_file): + """Test that each runnable snippet file can be imported as a module. + + This catches syntax errors and import-time errors in a more controlled way. + + Parameters + ---------- + snippet_file : Path + Path to the snippet file to test. + """ + spec = importlib.util.spec_from_file_location( + f"snippet_{snippet_file.stem}", snippet_file + ) + if spec is None or spec.loader is None: + pytest.fail(f"Could not load spec for {snippet_file}") + + module = importlib.util.module_from_spec(spec) + try: + spec.loader.exec_module(module) + except Exception as e: + pytest.fail(f"Snippet {snippet_file.name} failed to import: {e}") + + +def test_all_snippets_have_markers(): + """Test that all snippet files contain proper start/end markers. + + This ensures that literalinclude directives can extract code properly. + """ + for snippet_file in get_all_snippet_files(): + content = snippet_file.read_text() + + # Check for at least one start/end pair + has_start = "# [start:" in content + has_end = "# [end:" in content + + if not (has_start and has_end): + pytest.fail( + f"Snippet {snippet_file.name} missing start/end markers. " + f"has_start={has_start}, has_end={has_end}" + ) + + +def test_snippet_markers_are_balanced(): + """Test that start/end markers are properly paired in each snippet file.""" + import re + + for snippet_file in get_all_snippet_files(): + content = snippet_file.read_text() + + starts = re.findall(r"# \[start:(\w+)\]", content) + ends = re.findall(r"# \[end:(\w+)\]", content) + + # Check that every start has a matching end + for marker in starts: + if marker not in ends: + pytest.fail( + f"Snippet {snippet_file.name} has unmatched start marker: {marker}" + ) + + for marker in ends: + if marker not in starts: + pytest.fail( + f"Snippet {snippet_file.name} has unmatched end marker: {marker}" + ) + + +def get_rst_files(): + """Collect all RST files in the source directory. + + Returns + ------- + list[Path] + List of paths to all RST files. + """ + return sorted(SOURCE_DIR.rglob("*.rst")) + + +def extract_github_file_links(content: str) -> list[tuple[str, str]]: + """Extract GitHub file links from RST content. + + Finds links of the form: + https://github.com/SimonBlanke/Hyperactive/blob/master/path/to/file.py + + Parameters + ---------- + content : str + RST file content. + + Returns + ------- + list[tuple[str, str]] + List of (full_url, relative_path) tuples. + """ + # Pattern matches GitHub blob URLs to this repo + pattern = r"https://github\.com/SimonBlanke/Hyperactive/blob/master/([^\s>`\"\']+)" + matches = re.findall(pattern, content) + return [ + (f"https://github.com/SimonBlanke/Hyperactive/blob/master/{path}", path) + for path in matches + ] + + +def test_github_example_links_exist(): + """Test that all GitHub example links in RST files point to existing files. + + This verifies that documentation links to example files are not broken. + Only checks links to files within this repository. + """ + broken_links = [] + + for rst_file in get_rst_files(): + content = rst_file.read_text() + links = extract_github_file_links(content) + + for full_url, rel_path in links: + local_path = REPO_ROOT / rel_path + if not local_path.exists(): + broken_links.append(f"{rst_file.name}: {rel_path} (file not found)") + + if broken_links: + msg = f"Found {len(broken_links)} broken GitHub file link(s):\n" + msg += "\n".join(f" - {link}" for link in broken_links) + pytest.fail(msg) + + +def extract_include_paths(content: str) -> list[str]: + """Extract include and literalinclude paths from RST content. + + Parameters + ---------- + content : str + RST file content. + + Returns + ------- + list[str] + List of relative paths referenced by include directives. + """ + # Match both include and literalinclude directives + # Format: .. include:: path or .. literalinclude:: path + pattern = r"\.\.\s+(?:include|literalinclude)::\s+([^\s\n]+)" + return re.findall(pattern, content) + + +def test_rst_includes_exist(): + """Test that all include/literalinclude paths in RST files exist. + + This catches broken include directives that reference non-existent files. + """ + broken_includes = [] + + for rst_file in get_rst_files(): + # Skip auto-generated files and templates (they use Jinja placeholders) + if "auto_generated" in str(rst_file) or "_templates" in str(rst_file): + continue + + content = rst_file.read_text() + includes = extract_include_paths(content) + + for include_path in includes: + # Resolve path relative to the RST file's directory + full_path = rst_file.parent / include_path + if not full_path.exists(): + broken_includes.append( + f"{rst_file.relative_to(SOURCE_DIR)}: {include_path}" + ) + + if broken_includes: + msg = f"Found {len(broken_includes)} broken include path(s):\n" + msg += "\n".join(f" - {inc}" for inc in broken_includes) + pytest.fail(msg) diff --git a/pyproject.toml b/pyproject.toml index a11b720e..b7d5c6c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,7 +59,7 @@ build = [ "wheel", ] test = [ - "pytest == 9.0.1", + "pytest == 9.0.2", "flake8", "pytest-cov", "pathos", @@ -71,9 +71,19 @@ test_parallel_backends = [ "joblib", 'ray >=2.40.0; python_version < "3.13"', ] +docs = [ + "myst-parser", + "numpydoc", + "pydata-sphinx-theme", + "Sphinx>=7.0.0,<9.0.0", + "sphinx-copybutton", + "sphinx-design", + "sphinx-issues", +] all_extras = [ "hyperactive[integrations]", "optuna<5", + "cmaes", # Required for CmaEsOptimizer (optuna's CMA-ES sampler) "lightning", ] diff --git a/requirements/requirements-test.in b/requirements/requirements-test.in index 9b22ebe5..b9ce6794 100644 --- a/requirements/requirements-test.in +++ b/requirements/requirements-test.in @@ -1,4 +1,4 @@ -pytest == 9.0.1 +pytest == 9.0.2 flake8 pytest-cov pathos diff --git a/src/hyperactive/experiment/integrations/torch_lightning_experiment.py b/src/hyperactive/experiment/integrations/torch_lightning_experiment.py index 0bdd1f80..f0407c24 100644 --- a/src/hyperactive/experiment/integrations/torch_lightning_experiment.py +++ b/src/hyperactive/experiment/integrations/torch_lightning_experiment.py @@ -21,13 +21,17 @@ class TorchExperiment(BaseExperiment): Parameters ---------- - datamodule : L.LightningDataModule - A PyTorch Lightning DataModule that handles data loading and preparation. + data_module : type + A PyTorch Lightning DataModule class (not an instance) that + handles data loading and preparation. It will be instantiated + with hyperparameters during optimization. lightning_module : type A PyTorch Lightning Module class (not an instance) that will be instantiated with hyperparameters during optimization. trainer_kwargs : dict, optional (default=None) A dictionary of keyword arguments to pass to the PyTorch Lightning Trainer. + dm_kwargs : dict, optional (default=None) + A dictionary of keyword arguments to pass to the Data Module upon instantiation. objective_metric : str, optional (default='val_loss') The metric used to evaluate the model's performance. This should correspond to a metric logged in the LightningModule during validation. @@ -93,14 +97,12 @@ class TorchExperiment(BaseExperiment): ... def val_dataloader(self): ... return DataLoader(self.val, batch_size=self.batch_size) >>> - >>> datamodule = RandomDataModule(batch_size=16) - >>> datamodule.setup() - >>> >>> # Create Experiment >>> experiment = TorchExperiment( - ... datamodule=datamodule, + ... data_module=RandomDataModule, ... lightning_module=SimpleLightningModule, ... trainer_kwargs={'max_epochs': 3}, + ... dm_kwargs={'batch_size': 16}, ... objective_metric="val_loss" ... ) >>> @@ -118,14 +120,16 @@ class TorchExperiment(BaseExperiment): def __init__( self, - datamodule, + data_module, lightning_module, trainer_kwargs=None, + dm_kwargs=None, objective_metric: str = "val_loss", ): - self.datamodule = datamodule + self.data_module = data_module self.lightning_module = lightning_module self.trainer_kwargs = trainer_kwargs or {} + self.dm_kwargs = dm_kwargs or {} self.objective_metric = objective_metric super().__init__() @@ -174,7 +178,8 @@ def _evaluate(self, params): try: model = self.lightning_module(**params) trainer = L.Trainer(**self._trainer_kwargs) - trainer.fit(model, self.datamodule) + data = self.data_module(**self.dm_kwargs) + trainer.fit(model, data) val_result = trainer.callback_metrics.get(self.objective_metric) metadata = {} @@ -265,10 +270,8 @@ def train_dataloader(self): def val_dataloader(self): return DataLoader(self.val, batch_size=self.batch_size) - datamodule = RandomDataModule(batch_size=16) - params = { - "datamodule": datamodule, + "data_module": RandomDataModule, "lightning_module": SimpleLightningModule, "trainer_kwargs": { "max_epochs": 1, @@ -276,6 +279,7 @@ def val_dataloader(self): "enable_model_summary": False, "logger": False, }, + "dm_kwargs": {"batch_size": 16}, "objective_metric": "val_loss", } @@ -339,10 +343,8 @@ def train_dataloader(self): def val_dataloader(self): return DataLoader(self.val, batch_size=self.batch_size) - datamodule2 = RegressionDataModule(batch_size=16, num_samples=150) - params2 = { - "datamodule": datamodule2, + "data_module": RegressionDataModule, "lightning_module": RegressionModule, "trainer_kwargs": { "max_epochs": 1, @@ -350,6 +352,7 @@ def val_dataloader(self): "enable_model_summary": False, "logger": False, }, + "dm_kwargs": {"batch_size": 8, "num_samples": 200}, "objective_metric": "val_loss", } @@ -370,4 +373,5 @@ def _get_score_params(cls): """ score_params1 = {"input_dim": 10, "hidden_dim": 20, "lr": 0.001} score_params2 = {"num_layers": 3, "hidden_size": 64, "dropout": 0.2} + return [score_params1, score_params2] diff --git a/src/hyperactive/integrations/sklearn/opt_cv.py b/src/hyperactive/integrations/sklearn/opt_cv.py index 2f0366fe..aae6ffc0 100644 --- a/src/hyperactive/integrations/sklearn/opt_cv.py +++ b/src/hyperactive/integrations/sklearn/opt_cv.py @@ -1,7 +1,6 @@ """opt_cv module for Hyperactive optimization.""" from collections.abc import Callable -from typing import Union from sklearn.base import BaseEstimator, clone @@ -107,7 +106,7 @@ def __init__( estimator, optimizer, *, - scoring: Union[Callable, str, None] = None, + scoring: Callable | str | None = None, refit: bool = True, cv=None, ): diff --git a/src/hyperactive/opt/optuna/_cmaes_optimizer.py b/src/hyperactive/opt/optuna/_cmaes_optimizer.py index 4e47f0ec..3c3c5788 100644 --- a/src/hyperactive/opt/optuna/_cmaes_optimizer.py +++ b/src/hyperactive/opt/optuna/_cmaes_optimizer.py @@ -128,6 +128,7 @@ def get_test_params(cls, parameter_set="default"): from sklearn.datasets import make_regression from sklearn.neural_network import MLPRegressor + from hyperactive.experiment.bench import Sphere from hyperactive.experiment.integrations import SklearnCvExperiment # Test case 1: Basic continuous parameters (from base) @@ -165,6 +166,8 @@ def get_test_params(cls, parameter_set="default"): ) # Test case 3: High-dimensional continuous space (CMA-ES strength) + # Use Sphere benchmark which expects x0, x1, ..., xN params + sphere_exp = Sphere(n_dim=6) high_dim_continuous = { f"x{i}": (-1.0, 1.0) for i in range(6) # 6D continuous optimization @@ -174,7 +177,7 @@ def get_test_params(cls, parameter_set="default"): { "param_space": high_dim_continuous, "n_trials": 12, - "experiment": mlp_exp, + "experiment": sphere_exp, "sigma0": 0.7, # Larger initial spread "n_startup_trials": 3, }