14 Commits
v1.0.0 ... main

Author SHA1 Message Date
102814420b Merge pull request 'Fix: Update container image dependencies' (#7) from experimental into main
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 21s
Reviewed-on: #7
2025-09-29 05:17:26 +00:00
55971496c8 Fix: Matplotlib permission error
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 34s
2025-09-29 05:07:24 +00:00
135d596119 Fix: Dependency update for Docker image
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 1m53s
2025-09-29 04:52:17 +00:00
205c8eb9b7 Fix: Dependency update for Docker image
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 2m51s
2025-09-29 04:40:19 +00:00
f5528dcc9c Merge pull request 'Feature Add: Resource usage graphs' (#6) from experimental into main
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 22s
Reviewed-on: #6
2025-09-29 04:25:20 +00:00
ce77639a47 Add: Dynamic graph scaling for multi vCPU
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 33s
2025-09-29 04:07:33 +00:00
ce4887bae3 Fix: Delete graph image if server not running
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 36s
2025-09-28 18:41:50 +00:00
ca9e88f1e2 Improve formatting in embeds
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 34s
2025-09-28 17:10:03 +00:00
4b400fea1f Add server metrics graphing feature
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 5m25s
2025-09-28 16:11:43 +00:00
4260948c1c Merge pull request 'Update embed text formatting' (#4) from experimental into main
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 25s
Reviewed-on: #4
2025-09-26 12:40:59 +00:00
cbb951d121 Update embed text formatting
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 28s
2025-09-26 12:38:47 +00:00
a546540c45 Merge pull request 'Redesign server embed' (#3) from experimental into main
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 23s
Reviewed-on: #3
2025-09-26 11:18:59 +00:00
174c27c933 Redesign server embed
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 28s
2025-09-26 10:43:03 +00:00
1f7f211e36 Add server uptime metric to embed
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 23s
2025-09-26 08:52:41 +00:00
5 changed files with 2394 additions and 654 deletions

View File

@@ -18,13 +18,13 @@ COPY requirements.txt .
RUN --mount=type=cache,target=/root/.cache/pip \ RUN --mount=type=cache,target=/root/.cache/pip \
pip install --no-cache-dir -r requirements.txt pip install --no-cache-dir -r requirements.txt
# Final stage - using smaller base image # Final stage - using slim
FROM python:3.11-alpine3.18 FROM python:3.11-slim
# Install minimal runtime dependencies # Install minimal runtime dependencies
RUN apk add --no-cache \ RUN apt-get update && apt-get install -y --no-install-recommends \
tini \ tini \
&& rm -rf /var/cache/apk/* && rm -rf /var/lib/apt/lists/*
# Set working directory # Set working directory
WORKDIR /app WORKDIR /app
@@ -34,18 +34,22 @@ COPY --from=builder /opt/venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH" ENV PATH="/opt/venv/bin:$PATH"
# Create a non-root user # Create a non-root user
RUN addgroup -S bot && adduser -S bot -G bot RUN groupadd -r bot && useradd -r -g bot bot
# Copy necessary files # Copy necessary files
COPY --chown=bot:bot *.py ./ COPY --chown=bot:bot *.py ./
COPY --chown=bot:bot entrypoint.sh ./ COPY --chown=bot:bot entrypoint.sh ./
# Add other necessary directories/files as needed
# Create directories for persistent storage # Create directories for persistent storage
RUN mkdir -p logs embed && \ RUN mkdir -p logs embed && \
chown -R bot:bot /app logs embed && \ chown -R bot:bot /app logs embed && \
chmod -R 777 /app logs embed chmod -R 777 /app logs embed
# Create and set permissions for matplotlib config directory
RUN mkdir -p /tmp/matplotlib && \
chown -R bot:bot /tmp/matplotlib && \
chmod -R 777 /tmp/matplotlib
# Switch to non root user # Switch to non root user
USER bot USER bot
@@ -54,6 +58,8 @@ ENV PYTHONUNBUFFERED=1
ENV CONFIG_PATH=/app/config.ini ENV CONFIG_PATH=/app/config.ini
ENV PYTHONDONTWRITEBYTECODE=1 ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONPYCACHEPREFIX=/tmp ENV PYTHONPYCACHEPREFIX=/tmp
ENV MPLCONFIGDIR=/tmp/matplotlib
ENV MPLBACKEND=Agg
# Run the bot using tini and entrypoint script # Run the bot using tini and entrypoint script
ENTRYPOINT ["tini", "--", "/bin/sh", "entrypoint.sh"] ENTRYPOINT ["tini", "--", "/bin/sh", "entrypoint.sh"]

File diff suppressed because it is too large Load Diff

View File

@@ -30,7 +30,13 @@ import configparser
from datetime import datetime from datetime import datetime
from typing import Dict, List, Optional, Tuple from typing import Dict, List, Optional, Tuple
from pathlib import Path from pathlib import Path
import generate_config import matplotlib
matplotlib.use('Agg') # Use non-interactive backend for server environments
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from collections import deque
import io
from server_metrics_graphs import ServerMetricsGraphs, ServerMetricsManager
# ============================================== # ==============================================
# LOGGING SETUP # LOGGING SETUP
@@ -549,10 +555,11 @@ class PterodactylBot(commands.Bot):
self.server_cache: Dict[str, dict] = {} # Cache of server data from Pterodactyl self.server_cache: Dict[str, dict] = {} # Cache of server data from Pterodactyl
self.embed_locations: Dict[str, Dict[str, int]] = {} # Tracks where embeds are posted self.embed_locations: Dict[str, Dict[str, int]] = {} # Tracks where embeds are posted
self.update_lock = asyncio.Lock() # Prevents concurrent updates self.update_lock = asyncio.Lock() # Prevents concurrent updates
self.embed_storage_path = Path(EMBED_LOCATIONS_FILE) # File to store embed locations self.embed_storage_path = Path(EMBED_LOCATIONS_FILE) # File to store embed
self.metrics_manager = ServerMetricsManager() # Data manager for metrics graphing system
# Track previous server states and CPU usage to detect changes # Track previous server states and CPU usage to detect changes
# Format: {server_id: (state, cpu_usage)} # Format: {server_id: (state, cpu_usage, last_force_update)}
self.previous_states: Dict[str, Tuple[str, float]] = {} self.previous_states: Dict[str, Tuple[str, float, Optional[float]]] = {}
logger.info("Initialized PterodactylBot instance with state tracking") logger.info("Initialized PterodactylBot instance with state tracking")
async def setup_hook(self): async def setup_hook(self):
@@ -750,25 +757,104 @@ class PterodactylBot(commands.Bot):
timestamp=datetime.now() timestamp=datetime.now()
) )
embed.add_field(name="Server ID", value=identifier, inline=True) embed.add_field(name="🆔 Server ID", value=f"`{identifier}`", inline=True)
if is_suspended: if is_suspended:
embed.add_field(name="Status", value="⛔ Suspended", inline=True) embed.add_field(name=" Status", value="`Suspended`", inline=True)
else: else:
embed.add_field(name="Status", value="✅ Active", inline=True) embed.add_field(name=" Status", value="`Active`", inline=True)
# Add resource usage if server is running # Add resource usage if server is running
if current_state.lower() == "running": if current_state.lower() != "offline":
# Current usage
cpu_usage = round(resource_attributes.get('resources', {}).get('cpu_absolute', 0), 2) cpu_usage = round(resource_attributes.get('resources', {}).get('cpu_absolute', 0), 2)
memory_usage = round(resource_attributes.get('resources', {}).get('memory_bytes', 0) / (1024 ** 2), 2) memory_usage = round(resource_attributes.get('resources', {}).get('memory_bytes', 0) / (1024 ** 2), 2)
disk_usage = round(resource_attributes.get('resources', {}).get('disk_bytes', 0) / (1024 ** 2), 2) disk_usage = round(resource_attributes.get('resources', {}).get('disk_bytes', 0) / (1024 ** 2), 2)
network_rx = round(resource_attributes.get('resources', {}).get('network_rx_bytes', 0) / (1024 ** 2), 2) network_rx = round(resource_attributes.get('resources', {}).get('network_rx_bytes', 0) / (1024 ** 2), 2)
network_tx = round(resource_attributes.get('resources', {}).get('network_tx_bytes', 0) / (1024 ** 2), 2) network_tx = round(resource_attributes.get('resources', {}).get('network_tx_bytes', 0) / (1024 ** 2), 2)
embed.add_field(name="CPU Usage", value=f"{cpu_usage}%", inline=True) # Maximum allocated resources from server data
embed.add_field(name="Memory Usage", value=f"{memory_usage} MB", inline=True) limits = attributes.get('limits', {})
embed.add_field(name="Disk Usage", value=f"{disk_usage} MB", inline=True) cpu_limit = limits.get('cpu', 0)
embed.add_field(name="Network", value=f"⬇️ {network_rx} MB / ⬆️ {network_tx} MB", inline=False) memory_limit = limits.get('memory', 0)
disk_limit = limits.get('disk', 0)
# Format limit values - display ∞ for unlimited (0 limit)
def format_limit(value, unit=""):
if value == 0:
return f"{'':<8}{unit}" # Lemniscate symbol for infinity
else:
return f"{value:<8}{unit}"
# Get uptime from Pterodactyl API (in milliseconds)
uptime_ms = resource_attributes.get('resources', {}).get('uptime', 0)
# Format uptime for display
if uptime_ms > 0:
uptime_seconds = uptime_ms // 1000 # Convert ms to seconds
if uptime_seconds < 60:
uptime_text = f"`{uptime_seconds}s`"
elif uptime_seconds < 3600:
uptime_text = f"`{uptime_seconds // 60}m {uptime_seconds % 60}s`"
elif uptime_seconds < 86400:
hours = uptime_seconds // 3600
minutes = (uptime_seconds % 3600) // 60
uptime_text = f"`{hours}h {minutes}m`"
else:
days = uptime_seconds // 86400
hours = (uptime_seconds % 86400) // 3600
uptime_text = f"`{days}d {hours}h`"
else:
uptime_text = "`Just started`"
embed.add_field(name="⏱️ Uptime", value=uptime_text, inline=True)
# Create dedicated usage text box with current usage and limits in monospace font
usage_text = (
f"```properties\n"
f"CPU: {cpu_usage:>8} / {format_limit(cpu_limit, ' %')}\n"
f"Memory: {memory_usage:>8} / {format_limit(memory_limit, ' MiB')}\n"
f"Disk: {disk_usage:>8} / {format_limit(disk_limit, ' MiB')}\n"
f"```"
)
embed.add_field(
name="📊 Resource Usage",
value=usage_text,
inline=False
)
embed.add_field(
name="Network In",
value=f"📥 `{network_rx} MiB`",
inline=True
)
embed.add_field(
name="Network Out",
value=f"📤 `{network_tx} MiB`",
inline=True
)
# Add graph images if available
server_graphs = self.metrics_manager.get_server_graphs(identifier)
if server_graphs and server_graphs.has_sufficient_data:
summary = server_graphs.get_data_summary()
graph_description = (
f">>> `Data points: {summary['point_count']}/6`\n"
f"`CPU trend: {summary['cpu_trend']} • Memory trend: {summary['memory_trend']}`"
)
# Add a field explaining the graphs
embed.add_field(
name="📈 Usage Trends (Last Minute)",
value=graph_description,
inline=False
)
# Set graph images (these will be attached as files in the update_status method)
embed.set_image(url=f"attachment://metrics_graph_{identifier}.png")
embed.set_footer(text="Last updated") embed.set_footer(text="Last updated")
@@ -790,6 +876,7 @@ class PterodactylBot(commands.Bot):
1. Server power state changes (started/stopped/restarted) 1. Server power state changes (started/stopped/restarted)
2. Significant CPU usage change (>50% difference) 2. Significant CPU usage change (>50% difference)
3. First time seeing the server 3. First time seeing the server
4. Server has been running for 10 minutes (force update for uptime)
This minimizes API calls to Discord and updates while maintaining This minimizes API calls to Discord and updates while maintaining
real-time awareness of important server changes. real-time awareness of important server changes.
@@ -806,12 +893,17 @@ class PterodactylBot(commands.Bot):
# Update our local cache with fresh server data # Update our local cache with fresh server data
self.server_cache = {server['attributes']['identifier']: server for server in servers} self.server_cache = {server['attributes']['identifier']: server for server in servers}
logger.debug(f"Updated server cache with {len(servers)} servers") logger.debug(f"Updated server cache with {len(servers)} servers")
# Clean up metrics for servers that no longer exist
active_server_ids = list(self.server_cache.keys())
self.metrics_manager.cleanup_old_servers(active_server_ids)
# Variables to track our update statistics # Variables to track our update statistics
update_count = 0 # Successful updates update_count = 0 # Successful updates
error_count = 0 # Failed updates error_count = 0 # Failed updates
missing_count = 0 # Missing embeds missing_count = 0 # Missing embeds
skipped_count = 0 # Servers that didn't need updates skipped_count = 0 # Servers that didn't need updates
current_time = datetime.now().timestamp()
# Process each server we're tracking embeds for # Process each server we're tracking embeds for
for server_id, location in list(self.embed_locations.items()): for server_id, location in list(self.embed_locations.items()):
@@ -830,9 +922,15 @@ class PterodactylBot(commands.Bot):
resources = await self.pterodactyl_api.get_server_resources(server_id) resources = await self.pterodactyl_api.get_server_resources(server_id)
current_state = resources.get('attributes', {}).get('current_state', 'offline') current_state = resources.get('attributes', {}).get('current_state', 'offline')
cpu_usage = round(resources.get('attributes', {}).get('resources', {}).get('cpu_absolute', 0), 2) cpu_usage = round(resources.get('attributes', {}).get('resources', {}).get('cpu_absolute', 0), 2)
# Retrieve previous recorded state and CPU usage # Collect metrics data for running servers
prev_state, prev_cpu = self.previous_states.get(server_id, (None, 0)) if current_state == 'running':
memory_usage = round(resources.get('attributes', {}).get('resources', {}).get('memory_bytes', 0) / (1024 ** 2), 2)
self.metrics_manager.add_server_data(server_id, server_name, cpu_usage, memory_usage)
logger.debug(f"Added metrics data for {server_name}: CPU={cpu_usage}%, Memory={memory_usage}MB")
# Retrieve previous recorded state, CPU usage, and last force update time
prev_state, prev_cpu, last_force_update = self.previous_states.get(server_id, (None, 0, None))
# DECISION LOGIC: Should we update the embed? # DECISION LOGIC: Should we update the embed?
needs_update = False needs_update = False
@@ -852,6 +950,15 @@ class PterodactylBot(commands.Bot):
logger.debug(f"First check for {server_name}, performing initial update") logger.debug(f"First check for {server_name}, performing initial update")
needs_update = True needs_update = True
# 4. Force update every 10 minutes for running servers (for uptime counter)
elif (current_state == 'running' and
(last_force_update is None or
current_time - last_force_update >= 600)): # 10 minutes = 600 seconds
logger.debug(f"Executing 10-minute force update for running server {server_name}")
needs_update = True
# Update the last force update time
last_force_update = current_time
# PERFORM UPDATE IF NEEDED # PERFORM UPDATE IF NEEDED
if needs_update: if needs_update:
# Generate fresh embed and view components # Generate fresh embed and view components
@@ -865,14 +972,38 @@ class PterodactylBot(commands.Bot):
# Fetch and update the existing message # Fetch and update the existing message
message = await channel.fetch_message(int(location['message_id'])) message = await channel.fetch_message(int(location['message_id']))
await message.edit(embed=embed, view=view)
# Check if server is transitioning to offline/stopping state
# and remove image attachment if present
files = []
server_graphs = self.metrics_manager.get_server_graphs(server_id)
# Only include graph images if server is running AND has sufficient data
if (current_state == 'running' and
server_graphs and
server_graphs.has_sufficient_data):
# Generate metrics graph
combined_graph = server_graphs.generate_combined_graph()
if combined_graph:
files.append(discord.File(combined_graph, filename=f"metrics_graph_{server_id}.png"))
logger.debug(f"Including metrics graph for running server {server_name}")
else:
# Server is offline/stopping - ensure no image is attached
logger.debug(f"Server {server_name} is {current_state}, removing image attachment if present")
# We'll update without files to remove any existing attachments
# Update message with embed, view, and files (empty files list removes attachments)
await message.edit(embed=embed, view=view, attachments=files)
update_count += 1 update_count += 1
logger.debug(f"Updated status for {server_name}") logger.debug(f"Updated status for {server_name}")
# Update our state tracking with new values # Update our state tracking with new values
self.previous_states[server_id] = (current_state, cpu_usage) # Only update last_force_update if this was a force update
new_last_force_update = last_force_update if needs_update and current_state == 'running' and current_time - (last_force_update or 0) >= 600 else (last_force_update if last_force_update is not None else None)
self.previous_states[server_id] = (current_state, cpu_usage, new_last_force_update)
else: else:
# No significant changes detected # No significant changes detected, but update tracking with current state
self.previous_states[server_id] = (current_state, cpu_usage, last_force_update)
skipped_count += 1 skipped_count += 1
logger.debug(f"No changes detected for {server_name}, skipping update") logger.debug(f"No changes detected for {server_name}, skipping update")

View File

@@ -1,4 +1,5 @@
discord.py>=2.3.0 discord.py>=2.3.0
aiohttp>=3.8.0 aiohttp>=3.8.0
configparser>=5.3.0 configparser>=5.3.0
python-dotenv python-dotenv
matplotlib

472
server_metrics_graphs.py Normal file
View File

@@ -0,0 +1,472 @@
"""
Server Metrics Graphs Module for Pterodactyl Discord Bot
This module provides graphing capabilities for server CPU and memory usage.
Generates line graphs as PNG images for embedding in Discord messages.
"""
import matplotlib
matplotlib.use('Agg') # Use non-interactive backend for server environments
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from collections import deque
from datetime import datetime, timedelta
from typing import Dict, Tuple, Optional
import io
import logging
import math
# Get the logger from the main bot module
logger = logging.getLogger('pterodisbot')
class ServerMetricsGraphs:
"""
Manages CPU and memory usage graphs for individual servers.
Features:
- Stores last 6 data points (1 minute of history at 10-second intervals)
- Generates PNG images of line graphs for Discord embedding
- Automatic data rotation (FIFO queue with max 6 points)
- Separate tracking for CPU percentage and memory MB usage
- Dynamic CPU scaling in 100% increments for multi-vCPU servers
- Clean graph styling optimized for Discord dark theme
"""
def __init__(self, server_id: str, server_name: str):
"""
Initialize metrics tracking for a server.
Args:
server_id: Pterodactyl server identifier
server_name: Human-readable server name
"""
self.server_id = server_id
self.server_name = server_name
# Use deque with maxlen=6 for automatic FIFO rotation
# Each entry is a tuple: (timestamp, cpu_percent, memory_mb)
self.data_points = deque(maxlen=6)
# Track if we have enough data for meaningful graphs (at least 2 points)
self.has_sufficient_data = False
logger.debug(f"Initialized metrics tracking for server {server_name} ({server_id})")
def add_data_point(self, cpu_percent: float, memory_mb: float, timestamp: Optional[datetime] = None):
"""
Add a new data point to the metrics history.
Args:
cpu_percent: Current CPU usage percentage
memory_mb: Current memory usage in megabytes
timestamp: Optional timestamp, defaults to current time
"""
if timestamp is None:
timestamp = datetime.now()
# Add new data point (automatically rotates old data due to maxlen=6)
self.data_points.append((timestamp, cpu_percent, memory_mb))
# Update sufficient data flag
self.has_sufficient_data = len(self.data_points) >= 2
logger.debug(f"Added metrics data point for {self.server_name}: CPU={cpu_percent}%, Memory={memory_mb}MB")
def _calculate_cpu_scale_limit(self, max_cpu_value: float) -> int:
"""
Calculate appropriate CPU scale limit in 100% increments.
Args:
max_cpu_value: Maximum CPU value in the dataset
Returns:
Scale limit rounded up to nearest 100% increment
"""
if max_cpu_value <= 100:
return 100
# Round up to nearest 100% increment
# e.g., 150% -> 200%, 250% -> 300%, 350% -> 400%
return math.ceil(max_cpu_value / 100) * 100
def generate_cpu_graph(self) -> Optional[io.BytesIO]:
"""
Generate a CPU usage line graph as a PNG image.
Returns:
BytesIO object containing PNG image data, or None if insufficient data
"""
if not self.has_sufficient_data:
logger.debug(f"Insufficient data for CPU graph generation: {self.server_name}")
return None
try:
# Extract timestamps and CPU data
timestamps = [point[0] for point in self.data_points]
cpu_values = [point[1] for point in self.data_points]
# Calculate dynamic CPU scale limit
max_cpu = max(cpu_values)
cpu_scale_limit = self._calculate_cpu_scale_limit(max_cpu)
# Create figure with dark theme styling
plt.style.use('dark_background')
fig, ax = plt.subplots(figsize=(8, 4), dpi=100)
fig.patch.set_facecolor('#2f3136') # Discord dark theme background
ax.set_facecolor('#36393f') # Slightly lighter for graph area
# Plot CPU line with gradient fill
line = ax.plot(timestamps, cpu_values, color='#7289da', linewidth=2.5, marker='o', markersize=4)
ax.fill_between(timestamps, cpu_values, alpha=0.3, color='#7289da')
# Customize axes with dynamic scaling
ax.set_ylabel('CPU Usage (%)', color='#ffffff', fontsize=10)
ax.set_ylim(0, cpu_scale_limit)
# Add horizontal grid lines at 100% increments for better readability
for i in range(100, cpu_scale_limit + 1, 100):
ax.axhline(y=i, color='#ffffff', alpha=0.2, linestyle='--', linewidth=0.8)
# Format time axis
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax.xaxis.set_major_locator(mdates.SecondLocator(interval=20))
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45, ha='right', color='#ffffff', fontsize=8)
# Style the graph
ax.tick_params(colors='#ffffff', labelsize=8)
ax.grid(True, alpha=0.3, color='#ffffff')
ax.spines['bottom'].set_color('#ffffff')
ax.spines['left'].set_color('#ffffff')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Add title with scale info for multi-vCPU servers
title = f'{self.server_name} - CPU Usage'
if cpu_scale_limit > 100:
estimated_vcpus = cpu_scale_limit // 100
title += f' (~{estimated_vcpus} vCPU cores)'
ax.set_title(title, color='#ffffff', fontsize=12, pad=20)
# Tight layout to prevent label cutoff
plt.tight_layout()
# Save to BytesIO
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format='png', facecolor='#2f3136', edgecolor='none',
bbox_inches='tight', dpi=100)
img_buffer.seek(0)
# Clean up matplotlib resources
plt.close(fig)
logger.debug(f"Generated CPU graph for {self.server_name} (scale: 0-{cpu_scale_limit}%)")
return img_buffer
except Exception as e:
logger.error(f"Failed to generate CPU graph for {self.server_name}: {str(e)}")
plt.close('all') # Clean up any remaining figures
return None
def generate_memory_graph(self) -> Optional[io.BytesIO]:
"""
Generate a memory usage line graph as a PNG image.
Returns:
BytesIO object containing PNG image data, or None if insufficient data
"""
if not self.has_sufficient_data:
logger.debug(f"Insufficient data for memory graph generation: {self.server_name}")
return None
try:
# Extract timestamps and memory data
timestamps = [point[0] for point in self.data_points]
memory_values = [point[2] for point in self.data_points]
# Create figure with dark theme styling
plt.style.use('dark_background')
fig, ax = plt.subplots(figsize=(8, 4), dpi=100)
fig.patch.set_facecolor('#2f3136') # Discord dark theme background
ax.set_facecolor('#36393f') # Slightly lighter for graph area
# Plot memory line with gradient fill
line = ax.plot(timestamps, memory_values, color='#43b581', linewidth=2.5, marker='o', markersize=4)
ax.fill_between(timestamps, memory_values, alpha=0.3, color='#43b581')
# Customize axes
ax.set_ylabel('Memory Usage (MB)', color='#ffffff', fontsize=10)
ax.set_ylim(0, max(memory_values) * 1.1) # Dynamic scaling with 10% padding
# Format time axis
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax.xaxis.set_major_locator(mdates.SecondLocator(interval=20))
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45, ha='right', color='#ffffff', fontsize=8)
# Style the graph
ax.tick_params(colors='#ffffff', labelsize=8)
ax.grid(True, alpha=0.3, color='#ffffff')
ax.spines['bottom'].set_color('#ffffff')
ax.spines['left'].set_color('#ffffff')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Add title
ax.set_title(f'{self.server_name} - Memory Usage', color='#ffffff', fontsize=12, pad=20)
# Tight layout to prevent label cutoff
plt.tight_layout()
# Save to BytesIO
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format='png', facecolor='#2f3136', edgecolor='none',
bbox_inches='tight', dpi=100)
img_buffer.seek(0)
# Clean up matplotlib resources
plt.close(fig)
logger.debug(f"Generated memory graph for {self.server_name}")
return img_buffer
except Exception as e:
logger.error(f"Failed to generate memory graph for {self.server_name}: {str(e)}")
plt.close('all') # Clean up any remaining figures
return None
def generate_combined_graph(self) -> Optional[io.BytesIO]:
"""
Generate a combined CPU and memory usage graph as a PNG image.
Returns:
BytesIO object containing PNG image data, or None if insufficient data
"""
if not self.has_sufficient_data:
logger.debug(f"Insufficient data for combined graph generation: {self.server_name}")
return None
try:
# Extract data
timestamps = [point[0] for point in self.data_points]
cpu_values = [point[1] for point in self.data_points]
memory_values = [point[2] for point in self.data_points]
# Calculate dynamic CPU scale limit
max_cpu = max(cpu_values)
cpu_scale_limit = self._calculate_cpu_scale_limit(max_cpu)
# Create figure with two subplots
plt.style.use('dark_background')
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6), dpi=100, sharex=True)
fig.patch.set_facecolor('#2f3136')
# CPU subplot
ax1.set_facecolor('#36393f')
ax1.plot(timestamps, cpu_values, color='#7289da', linewidth=2.5, marker='o', markersize=4)
ax1.fill_between(timestamps, cpu_values, alpha=0.3, color='#7289da')
ax1.set_ylabel('CPU Usage (%)', color='#ffffff', fontsize=10)
ax1.set_ylim(0, cpu_scale_limit)
ax1.tick_params(colors='#ffffff', labelsize=8)
ax1.grid(True, alpha=0.3, color='#ffffff')
# Add horizontal grid lines at 100% increments for CPU subplot
for i in range(100, cpu_scale_limit + 1, 100):
ax1.axhline(y=i, color='#ffffff', alpha=0.2, linestyle='--', linewidth=0.8)
# Title with vCPU info if applicable
title = f'{self.server_name} - Resource Usage'
if cpu_scale_limit > 100:
estimated_vcpus = cpu_scale_limit // 100
title += f' (~{estimated_vcpus} vCPU cores)'
ax1.set_title(title, color='#ffffff', fontsize=12)
# Memory subplot
ax2.set_facecolor('#36393f')
ax2.plot(timestamps, memory_values, color='#43b581', linewidth=2.5, marker='o', markersize=4)
ax2.fill_between(timestamps, memory_values, alpha=0.3, color='#43b581')
ax2.set_ylabel('Memory (MB)', color='#ffffff', fontsize=10)
ax2.set_ylim(0, max(memory_values) * 1.1)
ax2.tick_params(colors='#ffffff', labelsize=8)
ax2.grid(True, alpha=0.3, color='#ffffff')
# Format time axis (only on bottom subplot)
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax2.xaxis.set_major_locator(mdates.SecondLocator(interval=20))
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=45, ha='right', color='#ffffff', fontsize=8)
# Style both subplots
for ax in [ax1, ax2]:
ax.spines['bottom'].set_color('#ffffff')
ax.spines['left'].set_color('#ffffff')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
# Save to BytesIO
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format='png', facecolor='#2f3136', edgecolor='none',
bbox_inches='tight', dpi=100)
img_buffer.seek(0)
plt.close(fig)
logger.debug(f"Generated combined graph for {self.server_name} (CPU scale: 0-{cpu_scale_limit}%)")
return img_buffer
except Exception as e:
logger.error(f"Failed to generate combined graph for {self.server_name}: {str(e)}")
plt.close('all')
return None
def get_data_summary(self) -> Dict[str, any]:
"""
Get summary statistics for the current data points.
Returns:
Dictionary containing data point count, latest values, and trends
"""
if not self.data_points:
return {
'point_count': 0,
'has_data': False,
'latest_cpu': 0,
'latest_memory': 0
}
# Get latest values
latest_point = self.data_points[-1]
latest_cpu = latest_point[1]
latest_memory = latest_point[2]
# Calculate CPU scale info
max_cpu = max(point[1] for point in self.data_points)
cpu_scale_limit = self._calculate_cpu_scale_limit(max_cpu)
estimated_vcpus = cpu_scale_limit // 100
# Calculate trends if we have multiple points
cpu_trend = 'stable'
memory_trend = 'stable'
if len(self.data_points) >= 2:
first_point = self.data_points[0]
cpu_change = latest_cpu - first_point[1]
memory_change = latest_memory - first_point[2]
# Determine trends (>5% change considered significant)
if abs(cpu_change) > 5:
cpu_trend = 'increasing' if cpu_change > 0 else 'decreasing'
if abs(memory_change) > 50: # 50MB change threshold
memory_trend = 'increasing' if memory_change > 0 else 'decreasing'
return {
'point_count': len(self.data_points),
'has_data': self.has_sufficient_data,
'latest_cpu': latest_cpu,
'latest_memory': latest_memory,
'cpu_trend': cpu_trend,
'memory_trend': memory_trend,
'cpu_scale_limit': cpu_scale_limit,
'estimated_vcpus': estimated_vcpus,
'time_span_minutes': len(self.data_points) * 10 / 60 # Convert to minutes
}
class ServerMetricsManager:
"""
Global manager for all server metrics graphs.
Handles:
- Creation and cleanup of ServerMetricsGraphs instances
- Bulk operations across all tracked servers
- Memory management for graph storage
"""
def __init__(self):
"""Initialize the metrics manager."""
self.server_graphs: Dict[str, ServerMetricsGraphs] = {}
logger.info("Initialized ServerMetricsManager")
def get_or_create_server_graphs(self, server_id: str, server_name: str) -> ServerMetricsGraphs:
"""
Get existing ServerMetricsGraphs instance or create a new one.
Args:
server_id: Pterodactyl server identifier
server_name: Human-readable server name
Returns:
ServerMetricsGraphs instance for the specified server
"""
if server_id not in self.server_graphs:
self.server_graphs[server_id] = ServerMetricsGraphs(server_id, server_name)
logger.debug(f"Created new metrics graphs for server {server_name}")
return self.server_graphs[server_id]
def add_server_data(self, server_id: str, server_name: str, cpu_percent: float, memory_mb: float):
"""
Add data point to a server's metrics tracking.
Args:
server_id: Pterodactyl server identifier
server_name: Human-readable server name
cpu_percent: Current CPU usage percentage
memory_mb: Current memory usage in megabytes
"""
graphs = self.get_or_create_server_graphs(server_id, server_name)
graphs.add_data_point(cpu_percent, memory_mb)
def remove_server(self, server_id: str):
"""
Remove a server from metrics tracking.
Args:
server_id: Pterodactyl server identifier to remove
"""
if server_id in self.server_graphs:
del self.server_graphs[server_id]
logger.debug(f"Removed metrics tracking for server {server_id}")
def get_server_graphs(self, server_id: str) -> Optional[ServerMetricsGraphs]:
"""
Get ServerMetricsGraphs instance for a specific server.
Args:
server_id: Pterodactyl server identifier
Returns:
ServerMetricsGraphs instance or None if not found
"""
return self.server_graphs.get(server_id)
def cleanup_old_servers(self, active_server_ids: list):
"""
Remove tracking for servers that no longer exist.
Args:
active_server_ids: List of currently active server IDs
"""
servers_to_remove = []
for server_id in self.server_graphs:
if server_id not in active_server_ids:
servers_to_remove.append(server_id)
for server_id in servers_to_remove:
self.remove_server(server_id)
if servers_to_remove:
logger.info(f"Cleaned up metrics for {len(servers_to_remove)} inactive servers")
def get_summary(self) -> Dict[str, any]:
"""
Get summary of all tracked servers.
Returns:
Dictionary with tracking statistics
"""
return {
'total_servers': len(self.server_graphs),
'servers_with_data': sum(1 for graphs in self.server_graphs.values() if graphs.has_sufficient_data),
'total_data_points': sum(len(graphs.data_points) for graphs in self.server_graphs.values())
}