Skip to content
This repository was archived by the owner on Apr 23, 2025. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 13 additions & 7 deletions docs/mcp_integration_plan.md
Original file line number Diff line number Diff line change
Expand Up @@ -167,12 +167,18 @@ This roadmap breaks the implementation into manageable milestones. Each mileston

For *each* milestone:

1. **Feature Branch:** All work will be done on a dedicated feature branch (e.g., `feature/mcp-client-setup`).
2. **Small Commits:** Use small, logical commits with clear messages.
3. **Code Coverage:** Maintain >80% unit test coverage for all *new* and *modified* code in both `cli-code` and the new `MCP Server` application. Coverage checks must pass in CI.
4. **Linting:** All code must pass configured linter checks (e.g., Ruff, MyPy) in CI.
5. **Pull Request:** Submit a Pull Request to `main` upon completion of the milestone.
6. **CI Pipeline:** The PR must pass all checks in the CI pipeline (tests, linting, coverage) before merging.
1. **Feature Branch:** All work for the milestone will be done on a dedicated feature branch (e.g., `feature/mcp-milestone-name`), created from the *latest* `main` branch.
2. **Small Commits:** Use small, logical commits with clear, conventional messages.
3. **Development:** Implement the features for the milestone.
4. **Local Testing:** Ensure code functions as expected locally.
5. **Unit Tests & Coverage:** Write unit tests for new/modified code. Maintain >80% coverage. Run tests locally (`pytest`).
6. **Linting:** Run linters locally (`ruff check .`, `ruff format .`). Fix issues.
7. **Commit & Push:** Commit the changes to the feature branch and push to the remote.
8. **Pull Request:** Create a Pull Request targeting the `main` branch.
9. **CI Pipeline (PR):** Verify that all CI checks (tests, linting, coverage) pass on the Pull Request.
10. **Merge:** Once CI passes and any required reviews are complete, merge the Pull Request into `main` (using squash-and-merge is recommended).
11. **CI Pipeline (Main):** Verify that the CI pipeline runs successfully on the `main` branch after the merge.
12. **Repeat:** For the next milestone, start again at step 1, ensuring the new feature branch is created from the updated `main` branch.

## 5. Additional Considerations

Expand All @@ -192,4 +198,4 @@ For *each* milestone:
* **Dependency (`chuk-mcp`):** Ensure compatibility and understand the maintenance status of the chosen MCP library.
* **Future Protocol Implementation:** While using `chuk-mcp` initially accelerates development, consider potentially developing an in-house MCP protocol handler in the future. This could offer more control and faster adaptation if the MCP standard evolves significantly or if `chuk-mcp` development lags behind desired features.
* **Performance:** Monitor for any noticeable latency introduced by the client-server hop, although it's expected to be minor for this use case.
```
```
67 changes: 67 additions & 0 deletions mcp_stub_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import asyncio
import logging

from chuk_mcp import (
MCPError,
MCPMessage,
MCPServerProtocol,
MCPToolCallRequest,
MCPToolResult,
decode_mcp_message,
encode_mcp_message,
)

# Configure basic logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
log = logging.getLogger(__name__)

HOST = "127.0.0.1"
PORT = 8999


class StubMCPServerProtocol(MCPServerProtocol):
"""A very basic MCP server protocol that just logs and acknowledges messages."""

async def handle_message(self, message: MCPMessage):
log.info(f"Stub Server Received: {message.model_dump_json(indent=2)}")

if message.message_type == "user_message":
# Send a simple acknowledgement
response = MCPMessage(
message_type="assistant_message",
agent_id=message.agent_id, # Echo back agent_id
session_id=message.session_id, # Echo back session_id
payload={"text": f"Stub Server received your message: '{message.payload.get('text', '')[:50]}...'"},
)
await self.send_message(response)
log.info(f"Stub Server Sent: {response.model_dump_json(indent=2)}")

elif message.message_type == "tool_result":
# Acknowledge tool result
response = MCPMessage(
message_type="assistant_message",
agent_id=message.agent_id,
session_id=message.session_id,
payload={"text": f"Stub Server received tool result for {message.payload.get('tool_name', '')}"},
)
await self.send_message(response)
log.info(f"Stub Server Sent: {response.model_dump_json(indent=2)}")

# Other message types could be handled here (tool_call_request etc.)
# For the stub, we just log them.


async def main():
"""Starts the Stub MCP Server."""
log.info(f"Starting Stub MCP Server on {HOST}:{PORT}...")
server = await asyncio.start_server(lambda: StubMCPServerProtocol(), HOST, PORT)

async with server:
await server.serve_forever()


if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
log.info("Stub MCP Server shutting down.")
19 changes: 11 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ authors = [
]
description = "An AI coding assistant CLI using Google's Gemini models with function calling."
readme = "README.md"
requires-python = ">=3.9" # Gemini library might require newer Python
requires-python = ">=3.11" # Gemini library might require newer Python; MCP requires >=3.11
license = "MIT"
classifiers = [
"Programming Language :: Python :: 3",
Expand All @@ -22,15 +22,16 @@ classifiers = [
"Topic :: Utilities",
]
dependencies = [
"google-generativeai>=0.5.0", # <<< ADDED/UPDATED GEMINI LIBRARY
"click>=8.0", # For CLI framework
"rich>=13.0", # For nice terminal output
"PyYAML>=6.0", # For config handling
"tiktoken>=0.6.0", # <-- ADDED TIKTOKEN DEPENDENCY
"questionary>=2.0.0", # <-- ADDED QUESTIONARY DEPENDENCY BACK
# "chuk-mcp>=0.1.0", # MCP client library - REMOVED
"google-generativeai>=0.5.0", # <<< ADDED/UPDATED GEMINI LIBRARY
"google-cloud-aiplatform", # Add vertexai dependency
"openai>=1.0.0", # Add openai library dependency
"protobuf>=4.0.0", # Add protobuf for schema conversion
"google-cloud-aiplatform", # Add vertexai dependency
"PyYAML>=6.0", # For config handling
"questionary>=2.0.0", # <-- ADDED QUESTIONARY DEPENDENCY BACK
"rich>=13.0", # For nice terminal output
"tiktoken>=0.6.0", # <-- ADDED TIKTOKEN DEPENDENCY
# Add any other direct dependencies your tools might have (e.g., requests for web_tools)
]

Expand All @@ -41,6 +42,8 @@ dev = [
"pytest>=7.0.0", # For running tests
"pytest-timeout>=2.2.0", # For test timeouts
"pytest-mock>=3.6.0", # Add pytest-mock dependency for mocker fixture
"pytest-cov>=4.0.0", # Add pytest-cov for coverage reporting
"pytest-asyncio>=0.21.0", # Add pytest-asyncio for async tests
"ruff>=0.1.0", # For linting and formatting
"protobuf>=4.0.0", # Also add to dev dependencies
"pre-commit>=3.5.0", # For pre-commit hooks
Expand Down Expand Up @@ -78,7 +81,7 @@ exclude = [
"__pycache__",
]
line-length = 120
target-version = "py39"
target-version = "py311"

[tool.ruff.lint]
# Enable a set of rules
Expand Down
Loading