Skip to content
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,7 @@ Thumbs.db
__pycache__
servers/mcp-neo4j-data-modeling/test.ipynb
servers/mcp-neo4j-data-modeling/src/mcp_neo4j_data_modeling/temp.html
mcp.json
mcp.json
servers/mcp-neo4j-cloud-aura-api/growth_models_smoke_test_results.txt
servers/mcp-neo4j-cloud-aura-api/*_smoke_test_results.txt
servers/mcp-neo4j-cloud-aura-api/*_test_results.json
3 changes: 3 additions & 0 deletions servers/mcp-neo4j-cloud-aura-api/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,11 @@
### Fixed

### Changed
* Make `avg_properties_per_node` and `avg_properties_per_relationship` required parameters for `calculate_database_sizing` tool to ensure accurate sizing calculations

### Added
* Add `calculate_database_sizing` tool for calculating Neo4j database sizing based on graph characteristics
* Add `forecast_database_size` tool for multi-year database size projections with workload-based growth models

## v0.4.6

Expand Down
41 changes: 41 additions & 0 deletions servers/mcp-neo4j-cloud-aura-api/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,47 @@ The server offers these core tools:
- `tenant_id` (string): ID of the tenant/project to retrieve
- Returns: Detailed information about the tenant/project

#### 📊 Database Sizing Tools
- `calculate_database_sizing`
- Calculate current Neo4j database sizing based on graph characteristics
- **Required inputs:**
- `num_nodes` (integer): Number of nodes in the graph
- `num_relationships` (integer): Number of relationships in the graph
- `avg_properties_per_node` (integer): Average number of properties per node (REQUIRED for accurate sizing)
- `avg_properties_per_relationship` (integer): Average number of properties per relationship (REQUIRED for accurate sizing)
- **Optional inputs:**
- `total_num_large_node_properties` (integer): Total number of large properties (128+ bytes) across all nodes
- `total_num_large_reltype_properties` (integer): Total number of large properties (128+ bytes) across all relationships
- `vector_index_dimensions` (integer): Vector index dimensions if using vector search
- `percentage_nodes_with_vector_properties` (float): Percentage of nodes with vector properties (0-100)
- `number_of_vector_indexes` (integer): Number of vector indexes
- `quantization_enabled` (boolean): Enable scalar quantization for vectors (4x storage reduction)
- `memory_to_storage_ratio` (integer): Memory-to-storage ratio denominator (1=1:1, 2=1:2, 4=1:4, 8=1:8)
- `concurrent_end_users` (integer): Number of concurrent end users (calculates vCPUs: 2 vCPU per user)
- Returns: Detailed sizing calculations including storage breakdown, recommended memory, and vCPUs
- **Note:** Property counts are required for accurate sizing. Missing property data leads to wildly inaccurate results.

- `forecast_database_size`
- Forecast database size growth over multiple years using component-based growth models
- **Required inputs:**
- `base_size_gb` (float): Current database size in GB
- `base_memory_gb` (integer): Current recommended memory in GB
- `base_cores` (integer): Current recommended number of cores
- `domain` (string): Graph domain from "7 Graphs of the Enterprise" - **REQUIRED**, primary driver for growth model selection
- Options: `"customer"`, `"product"`, `"employee"`, `"supplier"`, `"transaction"`, `"process"`, `"security"`, `"generic"`
- **Optional inputs:**
- `annual_growth_rate` (float): Annual growth rate percentage. If not provided, uses smart defaults based on domain/workload:
- Transactional: 20%
- Agentic: 15%
- Analytical: 5%
- Domain defaults vary (3-20% based on domain)
- `projection_years` (integer): Number of years to project (default: 3)
- `workloads` (array): Workload types that override domain-based growth models
- Options: `"transactional"` (fast growth - LogLinearGrowthModel for all components), `"agentic"` (medium growth - CompoundGrowthModel for all components), `"analytical"` (moderate growth - CompoundGrowthModel for storage, LinearGrowthModel for memory/vcpu)
- If provided, overrides domain-based model selection
- `memory_to_storage_ratio` (integer): Memory-to-storage ratio denominator (1=1:1, 2=1:2, 4=1:4, 8=1:8). Applied as constraint/floor for memory projections. Default: 1 (1:1 ratio)
- Returns: Multi-year projections with component-based growth models used (storage, memory, vcpu), projected sizes, memory, and cores for each year. Cores scale dynamically based on workload type and storage growth.


## 🔧 Usage with Claude Desktop

Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,30 @@
from typing import Dict, Any, List
from typing import Dict, Any, List, Optional, Literal
from .aura_api_client import AuraAPIClient
from .sizing import AuraSizingService
from .utils import get_logger

logger = get_logger(__name__)

class AuraManager:
"""Service layer for the Aura API MCP Server."""

def __init__(self, client_id: str, client_secret: str):
def __init__(
self,
client_id: str,
client_secret: str,
sizing_service: Optional[AuraSizingService] = None
):
"""
Initialize the Aura Manager.

Args:
client_id: Aura API client ID
client_secret: Aura API client secret
sizing_service: Optional sizing service instance. If None, creates a default instance.
Useful for dependency injection in tests.
"""
self.client = AuraAPIClient(client_id, client_secret)
self.sizing_service = sizing_service or AuraSizingService()

async def list_instances(self, **kwargs) -> Dict[str, Any]:
"""List all Aura database instances."""
Expand Down Expand Up @@ -126,4 +142,72 @@ async def delete_instance(self, instance_id: str, **kwargs) -> Dict[str, Any]:
try:
return self.client.delete_instance(instance_id)
except Exception as e:
return {"error": str(e)}
return {"error": str(e)}

async def calculate_database_sizing(
self,
num_nodes: int,
num_relationships: int,
avg_properties_per_node: int,
avg_properties_per_relationship: int,
total_num_large_node_properties: Optional[int] = None,
total_num_large_reltype_properties: Optional[int] = None,
vector_index_dimensions: Optional[int] = None,
percentage_nodes_with_vector_properties: Optional[float] = None,
number_of_vector_indexes: Optional[int] = None,
quantization_enabled: bool = False,
memory_to_storage_ratio: Optional[int] = None,
concurrent_end_users: Optional[int] = None,
**kwargs
) -> Dict[str, Any]:
"""Calculate current database sizing based on graph metrics."""
try:
result = self.sizing_service.calculate_sizing(
num_nodes=num_nodes,
num_relationships=num_relationships,
avg_properties_per_node=avg_properties_per_node,
avg_properties_per_relationship=avg_properties_per_relationship,
total_num_large_node_properties=total_num_large_node_properties,
total_num_large_reltype_properties=total_num_large_reltype_properties,
vector_index_dimensions=vector_index_dimensions,
percentage_nodes_with_vector_properties=percentage_nodes_with_vector_properties,
number_of_vector_indexes=number_of_vector_indexes,
quantization_enabled=quantization_enabled,
memory_to_storage_ratio=memory_to_storage_ratio,
concurrent_end_users=concurrent_end_users,
)
# Convert Pydantic model to dict
return result.model_dump()
except Exception as e:
logger.error(f"Error calculating sizing: {str(e)}")
raise # Re-raise exception so FastMCP can handle it properly

async def forecast_database_size(
self,
base_size_gb: float,
base_memory_gb: int,
base_cores: int,
domain: str,
annual_growth_rate: float = 10.0,
projection_years: int = 3,
workloads: Optional[List[str]] = None,
memory_to_storage_ratio: int = 1,
**kwargs
) -> Dict[str, Any]:
"""Forecast database growth over multiple years."""
try:
result = self.sizing_service.forecast_sizing(
base_size_gb=base_size_gb,
base_memory_gb=base_memory_gb,
base_cores=base_cores,
domain=domain,
annual_growth_rate=annual_growth_rate,
projection_years=projection_years,
workloads=workloads,
memory_to_storage_ratio=memory_to_storage_ratio,
)
# Convert Pydantic model to dict
return result.model_dump()
except Exception as e:
logger.error(f"Error forecasting sizing: {str(e)}")
raise # Re-raise exception so FastMCP can handle it properly
Loading