1 Commits

Author SHA1 Message Date
08dee3db99 Add: CI/CD testing automation
Some checks failed
CI/CD Pipeline / Unit Tests (Python 3.10) (push) Failing after 6m34s
CI/CD Pipeline / Unit Tests (Python 3.11) (push) Failing after 5m31s
CI/CD Pipeline / Unit Tests (Python 3.9) (push) Failing after 5m44s
CI/CD Pipeline / Code Quality & Linting (push) Successful in 48s
CI/CD Pipeline / Security Scanning (push) Successful in 17s
CI/CD Pipeline / Integration Tests (push) Has been skipped
CI/CD Pipeline / Build Docker Image (push) Has been skipped
CI/CD Pipeline / Generate Test Report (push) Successful in 13s
CI/CD Pipeline / CI/CD Pipeline Status (push) Successful in 1s
2025-10-23 13:20:39 +00:00
6 changed files with 1075 additions and 141 deletions

400
.gitea/workflows/ci-cd.yml Normal file
View File

@@ -0,0 +1,400 @@
name: CI/CD Pipeline
on:
push:
branches: [ main, experimental, dev ]
tags: [ 'v*.*.*' ]
pull_request:
branches: [ main ]
workflow_dispatch:
inputs:
skip_tests:
description: 'Skip tests'
required: false
default: 'false'
type: boolean
image_tag:
description: 'Custom tag for Docker image'
required: false
default: 'latest'
type: string
jobs:
# ==========================================
# TESTING STAGE
# ==========================================
unit-tests:
name: Unit Tests (Python ${{ matrix.python-version }})
runs-on: ubuntu-latest
if: ${{ !inputs.skip_tests }}
strategy:
fail-fast: false
matrix:
python-version: ['3.9', '3.10', '3.11']
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Cache pip dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-py${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }}
restore-keys: |
${{ runner.os }}-py${{ matrix.python-version }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools wheel
pip install -r requirements.txt
pip install -r requirements-test.txt
- name: Create test configuration
run: |
mkdir -p embed logs
cat > config.ini << EOF
[Pterodactyl]
PanelURL = https://panel.example.com
ClientAPIKey = ptlc_test_client_key_123456789
ApplicationAPIKey = ptla_test_app_key_987654321
[Discord]
Token = test_discord_token_placeholder
AllowedGuildID = 123456789
EOF
- name: Run unit tests with coverage
run: |
pytest test_pterodisbot.py \
-v \
--tb=short \
--cov=pterodisbot \
--cov=server_metrics_graphs \
--cov-report=xml \
--cov-report=term \
--cov-report=html \
--junitxml=test-results-${{ matrix.python-version }}.xml
- name: Upload coverage to artifacts
uses: actions/upload-artifact@v3
with:
name: coverage-report-py${{ matrix.python-version }}
path: |
coverage.xml
htmlcov/
test-results-${{ matrix.python-version }}.xml
code-quality:
name: Code Quality & Linting
runs-on: ubuntu-latest
if: ${{ !inputs.skip_tests }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install linting tools
run: |
python -m pip install --upgrade pip
pip install flake8 pylint black isort mypy
- name: Run flake8
run: |
flake8 pterodisbot.py server_metrics_graphs.py \
--max-line-length=120 \
--ignore=E501,W503,E203 \
--exclude=venv,__pycache__,build,dist \
--statistics \
--output-file=flake8-report.txt
continue-on-error: true
- name: Run pylint
run: |
pylint pterodisbot.py server_metrics_graphs.py \
--disable=C0111,C0103,R0913,R0914,R0915,W0718 \
--max-line-length=120 \
--output-format=text \
--reports=y > pylint-report.txt || true
continue-on-error: true
- name: Check code formatting with black
run: |
black --check --line-length=120 --diff pterodisbot.py server_metrics_graphs.py | tee black-report.txt
continue-on-error: true
- name: Check import ordering
run: |
isort --check-only --profile black --line-length=120 pterodisbot.py server_metrics_graphs.py
continue-on-error: true
- name: Type checking with mypy
run: |
mypy pterodisbot.py server_metrics_graphs.py --ignore-missing-imports > mypy-report.txt || true
continue-on-error: true
- name: Upload linting reports
uses: actions/upload-artifact@v3
with:
name: code-quality-reports
path: |
flake8-report.txt
pylint-report.txt
black-report.txt
mypy-report.txt
security-scan:
name: Security Scanning
runs-on: ubuntu-latest
if: ${{ !inputs.skip_tests }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install security tools
run: |
python -m pip install --upgrade pip
pip install bandit safety pip-audit
- name: Run bandit security scan
run: |
bandit -r . \
-f json \
-o bandit-report.json \
-ll \
--exclude ./venv,./test_*.py,./tests
continue-on-error: true
- name: Run safety dependency check
run: |
pip install -r requirements.txt
safety check --json --output safety-report.json || true
continue-on-error: true
- name: Run pip-audit
run: |
pip-audit --desc --format json --output pip-audit-report.json || true
continue-on-error: true
- name: Upload security reports
uses: actions/upload-artifact@v3
with:
name: security-reports
path: |
bandit-report.json
safety-report.json
pip-audit-report.json
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
needs: [unit-tests]
if: ${{ !inputs.skip_tests }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Cache dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-integration-${{ hashFiles('requirements.txt') }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-test.txt
- name: Create test configuration
run: |
mkdir -p embed logs
cat > config.ini << EOF
[Pterodactyl]
PanelURL = https://panel.example.com
ClientAPIKey = ptlc_test_client_key_123456789
ApplicationAPIKey = ptla_test_app_key_987654321
[Discord]
Token = test_discord_token_placeholder
AllowedGuildID = 123456789
EOF
- name: Run integration tests
run: |
pytest test_pterodisbot.py::TestIntegration \
-v \
--tb=short \
--timeout=60
# ==========================================
# BUILD STAGE
# ==========================================
docker-build:
name: Build Docker Image
runs-on: ubuntu-latest
needs: [unit-tests, code-quality, security-scan]
if: |
always() &&
(needs.unit-tests.result == 'success' || inputs.skip_tests) &&
(github.event_name == 'push' || github.event_name == 'workflow_dispatch')
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: linux/amd64,linux/arm64
driver-opts: |
image=moby/buildkit:latest
- name: Log in to registry
uses: docker/login-action@v2
with:
registry: ${{ vars.REGISTRY }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Generate Docker image tags
id: tags
run: |
IMAGE_NAME="${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}"
if [ -n "${{ github.event.inputs.image_tag }}" ]; then
PRIMARY_TAG="${{ github.event.inputs.image_tag }}"
elif [[ ${{ github.ref }} == refs/tags/v* ]]; then
PRIMARY_TAG="${GITHUB_REF#refs/tags/}"
elif [[ ${{ github.ref }} == refs/heads/main ]]; then
PRIMARY_TAG="latest"
elif [[ ${{ github.ref }} == refs/heads/experimental ]]; then
PRIMARY_TAG="experimental"
elif [[ ${{ github.ref }} == refs/heads/dev ]]; then
PRIMARY_TAG="dev"
else
PRIMARY_TAG="latest"
fi
TAGS="$IMAGE_NAME:$PRIMARY_TAG,$IMAGE_NAME:${{ github.sha }}"
if [[ ${{ github.ref }} == refs/tags/v* ]]; then
MAJOR_MINOR_TAG=$(echo "$PRIMARY_TAG" | sed -E 's/^v([0-9]+\.[0-9]+)\.[0-9]+.*$/v\1/')
if [[ "$MAJOR_MINOR_TAG" != "$PRIMARY_TAG" ]]; then
TAGS="$TAGS,$IMAGE_NAME:$MAJOR_MINOR_TAG"
fi
MAJOR_TAG=$(echo "$PRIMARY_TAG" | sed -E 's/^v([0-9]+)\.[0-9]+\.[0-9]+.*$/v\1/')
if [[ "$MAJOR_TAG" != "$PRIMARY_TAG" ]]; then
TAGS="$TAGS,$IMAGE_NAME:$MAJOR_TAG"
fi
fi
echo "tags=$TAGS" >> $GITHUB_OUTPUT
echo "Generated tags: $TAGS"
- name: Build and push multi-arch image
uses: docker/build-push-action@v4
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
cache-from: type=registry,ref=${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}:cache
cache-to: type=registry,ref=${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}:cache,mode=max
tags: ${{ steps.tags.outputs.tags }}
labels: |
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
# ==========================================
# REPORTING STAGE
# ==========================================
test-report:
name: Generate Test Report
runs-on: ubuntu-latest
needs: [unit-tests, code-quality, security-scan, integration-tests]
if: always() && !inputs.skip_tests
steps:
- name: Download all artifacts
uses: actions/download-artifact@v3
- name: Generate test summary
run: |
echo "## 🧪 Test Results Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Job Status:" >> $GITHUB_STEP_SUMMARY
echo "- ✅ Unit Tests: \`${{ needs.unit-tests.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "- 🎨 Code Quality: \`${{ needs.code-quality.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "- 🔒 Security Scan: \`${{ needs.security-scan.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "- 🔗 Integration Tests: \`${{ needs.integration-tests.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Artifacts Generated:" >> $GITHUB_STEP_SUMMARY
echo "- Coverage reports (HTML & XML)" >> $GITHUB_STEP_SUMMARY
echo "- Code quality reports (flake8, pylint, black)" >> $GITHUB_STEP_SUMMARY
echo "- Security scan reports (bandit, safety)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Commit:** \`${{ github.sha }}\`" >> $GITHUB_STEP_SUMMARY
echo "**Branch:** \`${{ github.ref_name }}\`" >> $GITHUB_STEP_SUMMARY
echo "**Triggered by:** ${{ github.actor }}" >> $GITHUB_STEP_SUMMARY
final-status:
name: CI/CD Pipeline Status
runs-on: ubuntu-latest
needs: [test-report, docker-build]
if: always()
steps:
- name: Check pipeline status
run: |
echo "## 🚀 CI/CD Pipeline Complete" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [[ "${{ needs.docker-build.result }}" == "success" ]]; then
echo "✅ **Docker image built and pushed successfully**" >> $GITHUB_STEP_SUMMARY
elif [[ "${{ needs.docker-build.result }}" == "skipped" ]]; then
echo "⏭️ **Docker build skipped**" >> $GITHUB_STEP_SUMMARY
else
echo "❌ **Docker build failed**" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Pipeline run:** ${{ github.run_number }}" >> $GITHUB_STEP_SUMMARY
echo "**Workflow:** ${{ github.workflow }}" >> $GITHUB_STEP_SUMMARY
- name: Fail if critical jobs failed
if: |
(needs.unit-tests.result == 'failure' && !inputs.skip_tests) ||
needs.docker-build.result == 'failure'
run: exit 1

View File

@@ -1,89 +0,0 @@
name: Docker Build and Push (Multi-architecture)
on:
push:
branches: [ main, experimental ]
tags: [ 'v*.*.*' ]
workflow_dispatch:
inputs:
image_tag:
description: 'Custom tag for the Docker image'
required: true
default: 'latest'
type: string
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: linux/amd64,linux/arm64
driver-opts: |
image=moby/buildkit:latest
- name: Log in to registry
uses: docker/login-action@v2
with:
registry: ${{ vars.REGISTRY }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Generate Docker image tags
id: tags
run: |
# Base image name
IMAGE_NAME="${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}"
# Determine primary tag
if [ -n "${{ github.event.inputs.image_tag }}" ]; then
PRIMARY_TAG="${{ github.event.inputs.image_tag }}"
elif [[ ${{ github.ref }} == refs/tags/v* ]]; then
PRIMARY_TAG="${GITHUB_REF#refs/tags/}"
elif [[ ${{ github.ref }} == refs/heads/main ]]; then
PRIMARY_TAG="latest"
elif [[ ${{ github.ref }} == refs/heads/experimental ]]; then
PRIMARY_TAG="experimental"
else
PRIMARY_TAG="latest"
fi
# Start with primary tag and SHA tag
TAGS="$IMAGE_NAME:$PRIMARY_TAG,$IMAGE_NAME:${{ github.sha }}"
# Add version tags for releases
if [[ ${{ github.ref }} == refs/tags/v* ]]; then
# Add major.minor tag (e.g., v1.2 for v1.2.3)
MAJOR_MINOR_TAG=$(echo "$PRIMARY_TAG" | sed -E 's/^v([0-9]+\.[0-9]+)\.[0-9]+.*$/v\1/')
if [[ "$MAJOR_MINOR_TAG" != "$PRIMARY_TAG" ]]; then
TAGS="$TAGS,$IMAGE_NAME:$MAJOR_MINOR_TAG"
fi
# Add major tag (e.g., v1 for v1.2.3)
MAJOR_TAG=$(echo "$PRIMARY_TAG" | sed -E 's/^v([0-9]+)\.[0-9]+\.[0-9]+.*$/v\1/')
if [[ "$MAJOR_TAG" != "$PRIMARY_TAG" ]]; then
TAGS="$TAGS,$IMAGE_NAME:$MAJOR_TAG"
fi
fi
echo "tags=$TAGS" >> $GITHUB_OUTPUT
echo "Generated tags: $TAGS"
- name: Build and push multi-arch image
uses: docker/build-push-action@v4
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
cache-from: type=registry,ref=${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}:cache
cache-to: type=registry,ref=${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}:cache,mode=max
tags: ${{ steps.tags.outputs.tags }}

76
.gitignore vendored
View File

@@ -4,6 +4,14 @@ __pycache__/
*.py[cod]
*$py.class
# IDEs
.vscode/
.idea/
*.swp
*.swo
*~
.DS_Store
# C extensions
*.so
@@ -37,20 +45,33 @@ MANIFEST
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
# Testing
__pycache__/
*.py[cod]
*$py.class
*.so
.pytest_cache/
.coverage
.coverage.*
.cache
nosetests.xml
htmlcov/
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
.tox/
.nox/
# Test reports
test-results*.xml
junit*.xml
*-report.txt
*-report.json
bandit-report.json
safety-report.json
pip-audit-report.json
flake8-report.txt
pylint-report.txt
black-report.txt
mypy-report.txt
# Translations
*.mo
@@ -83,37 +104,7 @@ target/
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
@@ -161,13 +152,6 @@ dmypy.json
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/

View File

@@ -782,9 +782,9 @@ class PterodactylBot(commands.Bot):
# Format limit values - display ∞ for unlimited (0 limit)
def format_limit(value, unit=""):
if value == 0:
return f"{'':<8}{unit}" # Lemniscate symbol for infinity
return f"{'':<8}]{unit}" # Lemniscate symbol for infinity
else:
return f"{value:<8}{unit}"
return f"{value:<8}]{unit}"
# Get uptime from Pterodactyl API (in milliseconds)
uptime_ms = resource_attributes.get('resources', {}).get('uptime', 0)
@@ -812,9 +812,9 @@ class PterodactylBot(commands.Bot):
# Create dedicated usage text box with current usage and limits in monospace font
usage_text = (
f"```properties\n"
f"CPU: {cpu_usage:>8} / {format_limit(cpu_limit, ' %')}\n"
f"Memory: {memory_usage:>8} / {format_limit(memory_limit, ' MiB')}\n"
f"Disk: {disk_usage:>8} / {format_limit(disk_limit, ' MiB')}\n"
f"CPU : [{cpu_usage:>8} / {format_limit(cpu_limit, ' %')}\n"
f"Memory : [{memory_usage:>8} / {format_limit(memory_limit, ' MiB')}\n"
f"Disk : [{disk_usage:>8} / {format_limit(disk_limit, ' MiB')}\n"
f"```"
)
@@ -1567,4 +1567,4 @@ if __name__ == "__main__":
sys.exit(1) # Exit with error code for crash
finally:
logger.info("Bot shutdown complete")
sys.exit(0) # Explicit clean exit
sys.exit(0) # Explicit clean exit

26
requirements-test.txt Normal file
View File

@@ -0,0 +1,26 @@
# Testing Dependencies for Pterodactyl Discord Bot
# Core testing framework
pytest>=7.4.0
pytest-asyncio>=0.21.0
pytest-cov>=4.1.0
pytest-mock>=3.11.1
pytest-timeout>=2.1.0
# Code quality and linting
flake8>=6.0.0
pylint>=2.17.0
black>=23.7.0
isort>=5.12.0
# Security scanning
bandit>=1.7.5
safety>=2.3.5
# Mocking and fixtures
pytest-fixtures>=0.1.0
freezegun>=1.2.2
# Coverage reporting
coverage>=7.2.7
coverage-badge>=1.1.0

613
test_pterodisbot.py Normal file
View File

@@ -0,0 +1,613 @@
"""
Unit and Integration Tests for Pterodactyl Discord Bot
Test coverage:
- Configuration validation
- Pterodactyl API client operations
- Discord bot commands and interactions
- Server metrics tracking
- Embed management
- Error handling
"""
import pytest
import asyncio
import json
import os
from unittest.mock import Mock, AsyncMock, patch, MagicMock
from datetime import datetime
import configparser
import discord
from discord.ext import commands
import aiohttp
# Import the modules to test
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from pterodisbot import (
PterodactylAPI,
ServerStatusView,
PterodactylBot,
ConfigValidationError,
validate_config,
REQUIRED_ROLE
)
from server_metrics_graphs import ServerMetricsGraphs, ServerMetricsManager
# ==========================================
# FIXTURES
# ==========================================
@pytest.fixture
def mock_config():
"""Create a mock configuration for testing."""
config = configparser.ConfigParser()
config['Pterodactyl'] = {
'PanelURL': 'https://panel.example.com',
'ClientAPIKey': 'ptlc_test_client_key_123',
'ApplicationAPIKey': 'ptla_test_app_key_456'
}
config['Discord'] = {
'Token': 'test_discord_token',
'AllowedGuildID': '123456789'
}
return config
@pytest.fixture
def mock_pterodactyl_api():
"""Create a mock PterodactylAPI instance."""
api = PterodactylAPI(
'https://panel.example.com',
'ptlc_test_client_key',
'ptla_test_app_key'
)
api.session = AsyncMock(spec=aiohttp.ClientSession)
return api
@pytest.fixture
def sample_server_data():
"""Sample server data from Pterodactyl API."""
return {
'attributes': {
'identifier': 'abc123',
'name': 'Test Server',
'description': 'A test game server',
'suspended': False,
'limits': {
'cpu': 200,
'memory': 2048,
'disk': 10240
}
}
}
@pytest.fixture
def sample_resources_data():
"""Sample resource usage data from Pterodactyl API."""
return {
'attributes': {
'current_state': 'running',
'resources': {
'cpu_absolute': 45.5,
'memory_bytes': 1073741824, # 1GB
'disk_bytes': 5368709120, # 5GB
'network_rx_bytes': 10485760, # 10MB
'network_tx_bytes': 5242880, # 5MB
'uptime': 3600000 # 1 hour in milliseconds
}
}
}
@pytest.fixture
def mock_discord_interaction():
"""Create a mock Discord interaction."""
interaction = AsyncMock(spec=discord.Interaction)
interaction.user = Mock()
interaction.user.name = 'TestUser'
interaction.user.roles = [Mock(name=REQUIRED_ROLE)]
interaction.guild_id = 123456789
interaction.channel = Mock()
interaction.channel.id = 987654321
interaction.response = AsyncMock()
interaction.followup = AsyncMock()
return interaction
# ==========================================
# CONFIGURATION VALIDATION TESTS
# ==========================================
class TestConfigValidation:
"""Test configuration validation logic."""
def test_valid_config(self, mock_config, monkeypatch):
"""Test that valid configuration passes validation."""
monkeypatch.setattr('pterodisbot.config', mock_config)
# Should not raise any exceptions
try:
validate_config()
except ConfigValidationError:
pytest.fail("Valid configuration should not raise ConfigValidationError")
def test_missing_pterodactyl_section(self, monkeypatch):
"""Test validation fails with missing Pterodactyl section."""
config = configparser.ConfigParser()
config['Discord'] = {
'Token': 'test_token',
'AllowedGuildID': '123456789'
}
monkeypatch.setattr('pterodisbot.config', config)
with pytest.raises(ConfigValidationError, match="Missing \\[Pterodactyl\\] section"):
validate_config()
def test_invalid_api_key_prefix(self, mock_config, monkeypatch):
"""Test validation fails with incorrect API key prefix."""
mock_config['Pterodactyl']['ClientAPIKey'] = 'invalid_prefix_key'
monkeypatch.setattr('pterodisbot.config', mock_config)
with pytest.raises(ConfigValidationError, match="ClientAPIKey should start with 'ptlc_'"):
validate_config()
def test_invalid_guild_id(self, mock_config, monkeypatch):
"""Test validation fails with invalid guild ID."""
mock_config['Discord']['AllowedGuildID'] = 'not_a_number'
monkeypatch.setattr('pterodisbot.config', mock_config)
with pytest.raises(ConfigValidationError, match="AllowedGuildID must be a valid integer"):
validate_config()
def test_invalid_panel_url(self, mock_config, monkeypatch):
"""Test validation fails with invalid panel URL."""
mock_config['Pterodactyl']['PanelURL'] = 'not-a-url'
monkeypatch.setattr('pterodisbot.config', mock_config)
with pytest.raises(ConfigValidationError, match="PanelURL must start with http"):
validate_config()
# ==========================================
# PTERODACTYL API TESTS
# ==========================================
class TestPterodactylAPI:
"""Test Pterodactyl API client functionality."""
@pytest.mark.asyncio
async def test_initialize(self):
"""Test API client initialization."""
api = PterodactylAPI('https://panel.example.com', 'ptlc_key', 'ptla_key')
await api.initialize()
assert api.session is not None
assert isinstance(api.session, aiohttp.ClientSession)
await api.close()
@pytest.mark.asyncio
async def test_close(self, mock_pterodactyl_api):
"""Test API client cleanup."""
await mock_pterodactyl_api.close()
mock_pterodactyl_api.session.close.assert_called_once()
@pytest.mark.asyncio
async def test_request_success(self, mock_pterodactyl_api):
"""Test successful API request."""
mock_response = AsyncMock()
mock_response.status = 200
mock_response.json = AsyncMock(return_value={'data': 'test'})
mock_pterodactyl_api.session.request = AsyncMock(return_value=mock_response)
mock_pterodactyl_api.session.request.return_value.__aenter__ = AsyncMock(return_value=mock_response)
mock_pterodactyl_api.session.request.return_value.__aexit__ = AsyncMock()
result = await mock_pterodactyl_api._request('GET', 'test/endpoint')
assert result == {'data': 'test'}
mock_pterodactyl_api.session.request.assert_called_once()
@pytest.mark.asyncio
async def test_request_error(self, mock_pterodactyl_api):
"""Test API request error handling."""
mock_response = AsyncMock()
mock_response.status = 404
mock_response.json = AsyncMock(return_value={
'errors': [{'detail': 'Server not found'}]
})
mock_pterodactyl_api.session.request = AsyncMock(return_value=mock_response)
mock_pterodactyl_api.session.request.return_value.__aenter__ = AsyncMock(return_value=mock_response)
mock_pterodactyl_api.session.request.return_value.__aexit__ = AsyncMock()
result = await mock_pterodactyl_api._request('GET', 'test/endpoint')
assert result['status'] == 'error'
assert 'Server not found' in result['message']
@pytest.mark.asyncio
async def test_get_servers(self, mock_pterodactyl_api, sample_server_data):
"""Test retrieving server list."""
mock_pterodactyl_api._request = AsyncMock(return_value={
'data': [sample_server_data]
})
servers = await mock_pterodactyl_api.get_servers()
assert len(servers) == 1
assert servers[0] == sample_server_data
mock_pterodactyl_api._request.assert_called_once_with(
'GET', 'application/servers', use_application_key=True
)
@pytest.mark.asyncio
async def test_get_server_resources(self, mock_pterodactyl_api, sample_resources_data):
"""Test retrieving server resource usage."""
mock_pterodactyl_api._request = AsyncMock(return_value=sample_resources_data)
resources = await mock_pterodactyl_api.get_server_resources('abc123')
assert resources['attributes']['current_state'] == 'running'
mock_pterodactyl_api._request.assert_called_once_with(
'GET', 'client/servers/abc123/resources'
)
@pytest.mark.asyncio
async def test_send_power_action_valid(self, mock_pterodactyl_api):
"""Test sending valid power action."""
mock_pterodactyl_api._request = AsyncMock(return_value={'status': 'success'})
result = await mock_pterodactyl_api.send_power_action('abc123', 'start')
assert result['status'] == 'success'
mock_pterodactyl_api._request.assert_called_once_with(
'POST', 'client/servers/abc123/power', {'signal': 'start'}
)
@pytest.mark.asyncio
async def test_send_power_action_invalid(self, mock_pterodactyl_api):
"""Test sending invalid power action."""
result = await mock_pterodactyl_api.send_power_action('abc123', 'invalid_action')
assert result['status'] == 'error'
assert 'Invalid action' in result['message']
# ==========================================
# SERVER METRICS TESTS
# ==========================================
class TestServerMetricsGraphs:
"""Test server metrics tracking and graphing."""
def test_initialization(self):
"""Test metrics graph initialization."""
graphs = ServerMetricsGraphs('abc123', 'Test Server')
assert graphs.server_id == 'abc123'
assert graphs.server_name == 'Test Server'
assert len(graphs.data_points) == 0
assert graphs.has_sufficient_data is False
def test_add_data_point(self):
"""Test adding data points."""
graphs = ServerMetricsGraphs('abc123', 'Test Server')
graphs.add_data_point(50.0, 1024.0)
assert len(graphs.data_points) == 1
assert graphs.has_sufficient_data is False
graphs.add_data_point(55.0, 1100.0)
assert len(graphs.data_points) == 2
assert graphs.has_sufficient_data is True
def test_data_rotation(self):
"""Test automatic data point rotation (FIFO with maxlen=6)."""
graphs = ServerMetricsGraphs('abc123', 'Test Server')
# Add 8 data points
for i in range(8):
graphs.add_data_point(float(i * 10), float(i * 100))
# Should only keep the last 6
assert len(graphs.data_points) == 6
assert graphs.data_points[0][1] == 20.0 # CPU of 3rd point
assert graphs.data_points[-1][1] == 70.0 # CPU of 8th point
def test_cpu_scale_calculation(self):
"""Test dynamic CPU scale limit calculation."""
graphs = ServerMetricsGraphs('abc123', 'Test Server')
# Test single vCPU (<=100%)
assert graphs._calculate_cpu_scale_limit(75.0) == 100
assert graphs._calculate_cpu_scale_limit(100.0) == 100
# Test multi-vCPU scenarios
assert graphs._calculate_cpu_scale_limit(150.0) == 200
assert graphs._calculate_cpu_scale_limit(250.0) == 300
assert graphs._calculate_cpu_scale_limit(350.0) == 400
def test_get_data_summary(self):
"""Test data summary generation."""
graphs = ServerMetricsGraphs('abc123', 'Test Server')
# No data
summary = graphs.get_data_summary()
assert summary['point_count'] == 0
assert summary['has_data'] is False
# Add data points with increasing trend
graphs.add_data_point(50.0, 1000.0)
graphs.add_data_point(60.0, 1100.0)
summary = graphs.get_data_summary()
assert summary['point_count'] == 2
assert summary['has_data'] is True
assert summary['latest_cpu'] == 60.0
assert summary['latest_memory'] == 1100.0
assert summary['cpu_trend'] == 'increasing'
def test_generate_graph_insufficient_data(self):
"""Test graph generation with insufficient data."""
graphs = ServerMetricsGraphs('abc123', 'Test Server')
# Only one data point - should return None
graphs.add_data_point(50.0, 1000.0)
assert graphs.generate_cpu_graph() is None
assert graphs.generate_memory_graph() is None
assert graphs.generate_combined_graph() is None
class TestServerMetricsManager:
"""Test server metrics manager."""
def test_initialization(self):
"""Test manager initialization."""
manager = ServerMetricsManager()
assert len(manager.server_graphs) == 0
def test_get_or_create_server_graphs(self):
"""Test getting or creating server graphs."""
manager = ServerMetricsManager()
graphs1 = manager.get_or_create_server_graphs('abc123', 'Test Server')
graphs2 = manager.get_or_create_server_graphs('abc123', 'Test Server')
assert graphs1 is graphs2 # Should return same instance
assert len(manager.server_graphs) == 1
def test_add_server_data(self):
"""Test adding data through manager."""
manager = ServerMetricsManager()
manager.add_server_data('abc123', 'Test Server', 50.0, 1024.0)
graphs = manager.get_server_graphs('abc123')
assert graphs is not None
assert len(graphs.data_points) == 1
def test_remove_server(self):
"""Test removing server from tracking."""
manager = ServerMetricsManager()
manager.add_server_data('abc123', 'Test Server', 50.0, 1024.0)
assert 'abc123' in manager.server_graphs
manager.remove_server('abc123')
assert 'abc123' not in manager.server_graphs
def test_cleanup_old_servers(self):
"""Test cleanup of inactive servers."""
manager = ServerMetricsManager()
# Add data for 3 servers
manager.add_server_data('server1', 'Server 1', 50.0, 1024.0)
manager.add_server_data('server2', 'Server 2', 60.0, 2048.0)
manager.add_server_data('server3', 'Server 3', 70.0, 3072.0)
# Only server1 and server2 are still active
manager.cleanup_old_servers(['server1', 'server2'])
assert 'server1' in manager.server_graphs
assert 'server2' in manager.server_graphs
assert 'server3' not in manager.server_graphs
def test_get_summary(self):
"""Test getting manager summary."""
manager = ServerMetricsManager()
# Add some servers with varying data
manager.add_server_data('server1', 'Server 1', 50.0, 1024.0)
manager.add_server_data('server1', 'Server 1', 55.0, 1100.0)
manager.add_server_data('server2', 'Server 2', 60.0, 2048.0)
summary = manager.get_summary()
assert summary['total_servers'] == 2
assert summary['servers_with_data'] == 1 # Only server1 has >=2 points
assert summary['total_data_points'] == 3
# ==========================================
# DISCORD BOT TESTS
# ==========================================
class TestServerStatusView:
"""Test Discord UI view for server status."""
@pytest.mark.asyncio
async def test_view_initialization(self, mock_pterodactyl_api, sample_server_data):
"""Test view initialization."""
view = ServerStatusView(
'abc123',
'Test Server',
mock_pterodactyl_api,
sample_server_data
)
assert view.server_id == 'abc123'
assert view.server_name == 'Test Server'
assert view.api is mock_pterodactyl_api
@pytest.mark.asyncio
async def test_interaction_check_authorized(self, mock_pterodactyl_api,
sample_server_data, mock_discord_interaction):
"""Test interaction check with authorized user."""
view = ServerStatusView('abc123', 'Test Server',
mock_pterodactyl_api, sample_server_data)
result = await view.interaction_check(mock_discord_interaction)
assert result is True
@pytest.mark.asyncio
async def test_interaction_check_wrong_guild(self, mock_pterodactyl_api,
sample_server_data, mock_discord_interaction):
"""Test interaction check with wrong guild."""
view = ServerStatusView('abc123', 'Test Server',
mock_pterodactyl_api, sample_server_data)
mock_discord_interaction.guild_id = 999999999 # Wrong guild
result = await view.interaction_check(mock_discord_interaction)
assert result is False
mock_discord_interaction.response.send_message.assert_called_once()
class TestPterodactylBot:
"""Test main bot class."""
@pytest.mark.asyncio
async def test_bot_initialization(self):
"""Test bot initialization."""
intents = discord.Intents.default()
bot = PterodactylBot(command_prefix="!", intents=intents)
assert bot.server_cache == {}
assert bot.embed_locations == {}
assert bot.metrics_manager is not None
@pytest.mark.asyncio
async def test_track_new_embed(self):
"""Test tracking new embed location."""
intents = discord.Intents.default()
bot = PterodactylBot(command_prefix="!", intents=intents)
mock_message = Mock()
mock_message.channel = Mock()
mock_message.channel.id = 123456
mock_message.id = 789012
with patch.object(bot, 'save_embed_locations', new=AsyncMock()):
await bot.track_new_embed('abc123', mock_message)
assert 'abc123' in bot.embed_locations
assert bot.embed_locations['abc123']['channel_id'] == '123456'
assert bot.embed_locations['abc123']['message_id'] == '789012'
@pytest.mark.asyncio
async def test_load_embed_locations(self, tmp_path):
"""Test loading embed locations from file."""
intents = discord.Intents.default()
bot = PterodactylBot(command_prefix="!", intents=intents)
# Create temporary embed locations file
embed_file = tmp_path / "embed_locations.json"
test_data = {
'abc123': {
'channel_id': '123456',
'message_id': '789012'
}
}
embed_file.write_text(json.dumps(test_data))
bot.embed_storage_path = embed_file
await bot.load_embed_locations()
assert 'abc123' in bot.embed_locations
assert bot.embed_locations['abc123']['channel_id'] == '123456'
@pytest.mark.asyncio
async def test_save_embed_locations(self, tmp_path):
"""Test saving embed locations to file."""
intents = discord.Intents.default()
bot = PterodactylBot(command_prefix="!", intents=intents)
embed_file = tmp_path / "embed_locations.json"
bot.embed_storage_path = embed_file
bot.embed_locations = {
'abc123': {
'channel_id': '123456',
'message_id': '789012'
}
}
await bot.save_embed_locations()
assert embed_file.exists()
loaded_data = json.loads(embed_file.read_text())
assert loaded_data == bot.embed_locations
# ==========================================
# INTEGRATION TESTS
# ==========================================
class TestIntegration:
"""Integration tests for complete workflows."""
@pytest.mark.asyncio
async def test_server_status_command_flow(self, mock_discord_interaction,
sample_server_data, sample_resources_data):
"""Test complete server status command flow."""
# This would require extensive mocking of Discord.py internals
# Simplified test to verify command registration
intents = discord.Intents.default()
bot = PterodactylBot(command_prefix="!", intents=intents)
# Verify command exists in tree
assert bot.tree is not None
@pytest.mark.asyncio
async def test_metrics_collection_and_graphing(self):
"""Test complete metrics collection and graph generation flow."""
manager = ServerMetricsManager()
# Simulate data collection over time
for i in range(6):
cpu = 50.0 + (i * 5)
memory = 1000.0 + (i * 100)
manager.add_server_data('test_server', 'Test Server', cpu, memory)
graphs = manager.get_server_graphs('test_server')
assert graphs is not None
assert graphs.has_sufficient_data
# Generate graphs
cpu_graph = graphs.generate_cpu_graph()
memory_graph = graphs.generate_memory_graph()
combined_graph = graphs.generate_combined_graph()
# Verify graphs were generated
assert cpu_graph is not None
assert memory_graph is not None
assert combined_graph is not None
# ==========================================
# RUN TESTS
# ==========================================
if __name__ == '__main__':
pytest.main([__file__, '-v', '--tb=short'])