Add: Dynamic graph scaling for multi vCPU
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 33s
All checks were successful
Docker Build and Push (Multi-architecture) / build-and-push (push) Successful in 33s
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -14,6 +14,7 @@ from datetime import datetime, timedelta
|
||||
from typing import Dict, Tuple, Optional
|
||||
import io
|
||||
import logging
|
||||
import math
|
||||
|
||||
# Get the logger from the main bot module
|
||||
logger = logging.getLogger('pterodisbot')
|
||||
@@ -27,6 +28,7 @@ class ServerMetricsGraphs:
|
||||
- Generates PNG images of line graphs for Discord embedding
|
||||
- Automatic data rotation (FIFO queue with max 6 points)
|
||||
- Separate tracking for CPU percentage and memory MB usage
|
||||
- Dynamic CPU scaling in 100% increments for multi-vCPU servers
|
||||
- Clean graph styling optimized for Discord dark theme
|
||||
"""
|
||||
|
||||
@@ -70,6 +72,23 @@ class ServerMetricsGraphs:
|
||||
|
||||
logger.debug(f"Added metrics data point for {self.server_name}: CPU={cpu_percent}%, Memory={memory_mb}MB")
|
||||
|
||||
def _calculate_cpu_scale_limit(self, max_cpu_value: float) -> int:
|
||||
"""
|
||||
Calculate appropriate CPU scale limit in 100% increments.
|
||||
|
||||
Args:
|
||||
max_cpu_value: Maximum CPU value in the dataset
|
||||
|
||||
Returns:
|
||||
Scale limit rounded up to nearest 100% increment
|
||||
"""
|
||||
if max_cpu_value <= 100:
|
||||
return 100
|
||||
|
||||
# Round up to nearest 100% increment
|
||||
# e.g., 150% -> 200%, 250% -> 300%, 350% -> 400%
|
||||
return math.ceil(max_cpu_value / 100) * 100
|
||||
|
||||
def generate_cpu_graph(self) -> Optional[io.BytesIO]:
|
||||
"""
|
||||
Generate a CPU usage line graph as a PNG image.
|
||||
@@ -86,6 +105,10 @@ class ServerMetricsGraphs:
|
||||
timestamps = [point[0] for point in self.data_points]
|
||||
cpu_values = [point[1] for point in self.data_points]
|
||||
|
||||
# Calculate dynamic CPU scale limit
|
||||
max_cpu = max(cpu_values)
|
||||
cpu_scale_limit = self._calculate_cpu_scale_limit(max_cpu)
|
||||
|
||||
# Create figure with dark theme styling
|
||||
plt.style.use('dark_background')
|
||||
fig, ax = plt.subplots(figsize=(8, 4), dpi=100)
|
||||
@@ -96,9 +119,13 @@ class ServerMetricsGraphs:
|
||||
line = ax.plot(timestamps, cpu_values, color='#7289da', linewidth=2.5, marker='o', markersize=4)
|
||||
ax.fill_between(timestamps, cpu_values, alpha=0.3, color='#7289da')
|
||||
|
||||
# Customize axes
|
||||
# Customize axes with dynamic scaling
|
||||
ax.set_ylabel('CPU Usage (%)', color='#ffffff', fontsize=10)
|
||||
ax.set_ylim(0, max(100, max(cpu_values) * 1.1)) # Dynamic scaling with 100% minimum
|
||||
ax.set_ylim(0, cpu_scale_limit)
|
||||
|
||||
# Add horizontal grid lines at 100% increments for better readability
|
||||
for i in range(100, cpu_scale_limit + 1, 100):
|
||||
ax.axhline(y=i, color='#ffffff', alpha=0.2, linestyle='--', linewidth=0.8)
|
||||
|
||||
# Format time axis
|
||||
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
|
||||
@@ -113,8 +140,12 @@ class ServerMetricsGraphs:
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
|
||||
# Add title
|
||||
ax.set_title(f'{self.server_name} - CPU Usage', color='#ffffff', fontsize=12, pad=20)
|
||||
# Add title with scale info for multi-vCPU servers
|
||||
title = f'{self.server_name} - CPU Usage'
|
||||
if cpu_scale_limit > 100:
|
||||
estimated_vcpus = cpu_scale_limit // 100
|
||||
title += f' (~{estimated_vcpus} vCPU cores)'
|
||||
ax.set_title(title, color='#ffffff', fontsize=12, pad=20)
|
||||
|
||||
# Tight layout to prevent label cutoff
|
||||
plt.tight_layout()
|
||||
@@ -128,7 +159,7 @@ class ServerMetricsGraphs:
|
||||
# Clean up matplotlib resources
|
||||
plt.close(fig)
|
||||
|
||||
logger.debug(f"Generated CPU graph for {self.server_name}")
|
||||
logger.debug(f"Generated CPU graph for {self.server_name} (scale: 0-{cpu_scale_limit}%)")
|
||||
return img_buffer
|
||||
|
||||
except Exception as e:
|
||||
@@ -219,6 +250,10 @@ class ServerMetricsGraphs:
|
||||
cpu_values = [point[1] for point in self.data_points]
|
||||
memory_values = [point[2] for point in self.data_points]
|
||||
|
||||
# Calculate dynamic CPU scale limit
|
||||
max_cpu = max(cpu_values)
|
||||
cpu_scale_limit = self._calculate_cpu_scale_limit(max_cpu)
|
||||
|
||||
# Create figure with two subplots
|
||||
plt.style.use('dark_background')
|
||||
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6), dpi=100, sharex=True)
|
||||
@@ -229,10 +264,20 @@ class ServerMetricsGraphs:
|
||||
ax1.plot(timestamps, cpu_values, color='#7289da', linewidth=2.5, marker='o', markersize=4)
|
||||
ax1.fill_between(timestamps, cpu_values, alpha=0.3, color='#7289da')
|
||||
ax1.set_ylabel('CPU Usage (%)', color='#ffffff', fontsize=10)
|
||||
ax1.set_ylim(0, max(100, max(cpu_values) * 1.1))
|
||||
ax1.set_ylim(0, cpu_scale_limit)
|
||||
ax1.tick_params(colors='#ffffff', labelsize=8)
|
||||
ax1.grid(True, alpha=0.3, color='#ffffff')
|
||||
ax1.set_title(f'{self.server_name} - Resource Usage', color='#ffffff', fontsize=12)
|
||||
|
||||
# Add horizontal grid lines at 100% increments for CPU subplot
|
||||
for i in range(100, cpu_scale_limit + 1, 100):
|
||||
ax1.axhline(y=i, color='#ffffff', alpha=0.2, linestyle='--', linewidth=0.8)
|
||||
|
||||
# Title with vCPU info if applicable
|
||||
title = f'{self.server_name} - Resource Usage'
|
||||
if cpu_scale_limit > 100:
|
||||
estimated_vcpus = cpu_scale_limit // 100
|
||||
title += f' (~{estimated_vcpus} vCPU cores)'
|
||||
ax1.set_title(title, color='#ffffff', fontsize=12)
|
||||
|
||||
# Memory subplot
|
||||
ax2.set_facecolor('#36393f')
|
||||
@@ -265,7 +310,7 @@ class ServerMetricsGraphs:
|
||||
|
||||
plt.close(fig)
|
||||
|
||||
logger.debug(f"Generated combined graph for {self.server_name}")
|
||||
logger.debug(f"Generated combined graph for {self.server_name} (CPU scale: 0-{cpu_scale_limit}%)")
|
||||
return img_buffer
|
||||
|
||||
except Exception as e:
|
||||
@@ -293,6 +338,11 @@ class ServerMetricsGraphs:
|
||||
latest_cpu = latest_point[1]
|
||||
latest_memory = latest_point[2]
|
||||
|
||||
# Calculate CPU scale info
|
||||
max_cpu = max(point[1] for point in self.data_points)
|
||||
cpu_scale_limit = self._calculate_cpu_scale_limit(max_cpu)
|
||||
estimated_vcpus = cpu_scale_limit // 100
|
||||
|
||||
# Calculate trends if we have multiple points
|
||||
cpu_trend = 'stable'
|
||||
memory_trend = 'stable'
|
||||
@@ -316,6 +366,8 @@ class ServerMetricsGraphs:
|
||||
'latest_memory': latest_memory,
|
||||
'cpu_trend': cpu_trend,
|
||||
'memory_trend': memory_trend,
|
||||
'cpu_scale_limit': cpu_scale_limit,
|
||||
'estimated_vcpus': estimated_vcpus,
|
||||
'time_span_minutes': len(self.data_points) * 10 / 60 # Convert to minutes
|
||||
}
|
||||
|
||||
@@ -417,4 +469,4 @@ class ServerMetricsManager:
|
||||
'total_servers': len(self.server_graphs),
|
||||
'servers_with_data': sum(1 for graphs in self.server_graphs.values() if graphs.has_sufficient_data),
|
||||
'total_data_points': sum(len(graphs.data_points) for graphs in self.server_graphs.values())
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user