Add: CI/CD testing automation
Some checks failed
CI/CD Pipeline / Unit Tests (Python 3.10) (push) Failing after 6m34s
CI/CD Pipeline / Unit Tests (Python 3.11) (push) Failing after 5m31s
CI/CD Pipeline / Unit Tests (Python 3.9) (push) Failing after 5m44s
CI/CD Pipeline / Code Quality & Linting (push) Successful in 48s
CI/CD Pipeline / Security Scanning (push) Successful in 17s
CI/CD Pipeline / Integration Tests (push) Has been skipped
CI/CD Pipeline / Build Docker Image (push) Has been skipped
CI/CD Pipeline / Generate Test Report (push) Successful in 13s
CI/CD Pipeline / CI/CD Pipeline Status (push) Successful in 1s

This commit is contained in:
2025-10-23 13:20:39 +00:00
parent 55971496c8
commit 08dee3db99
6 changed files with 1075 additions and 141 deletions

400
.gitea/workflows/ci-cd.yml Normal file
View File

@@ -0,0 +1,400 @@
name: CI/CD Pipeline
on:
push:
branches: [ main, experimental, dev ]
tags: [ 'v*.*.*' ]
pull_request:
branches: [ main ]
workflow_dispatch:
inputs:
skip_tests:
description: 'Skip tests'
required: false
default: 'false'
type: boolean
image_tag:
description: 'Custom tag for Docker image'
required: false
default: 'latest'
type: string
jobs:
# ==========================================
# TESTING STAGE
# ==========================================
unit-tests:
name: Unit Tests (Python ${{ matrix.python-version }})
runs-on: ubuntu-latest
if: ${{ !inputs.skip_tests }}
strategy:
fail-fast: false
matrix:
python-version: ['3.9', '3.10', '3.11']
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Cache pip dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-py${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }}
restore-keys: |
${{ runner.os }}-py${{ matrix.python-version }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools wheel
pip install -r requirements.txt
pip install -r requirements-test.txt
- name: Create test configuration
run: |
mkdir -p embed logs
cat > config.ini << EOF
[Pterodactyl]
PanelURL = https://panel.example.com
ClientAPIKey = ptlc_test_client_key_123456789
ApplicationAPIKey = ptla_test_app_key_987654321
[Discord]
Token = test_discord_token_placeholder
AllowedGuildID = 123456789
EOF
- name: Run unit tests with coverage
run: |
pytest test_pterodisbot.py \
-v \
--tb=short \
--cov=pterodisbot \
--cov=server_metrics_graphs \
--cov-report=xml \
--cov-report=term \
--cov-report=html \
--junitxml=test-results-${{ matrix.python-version }}.xml
- name: Upload coverage to artifacts
uses: actions/upload-artifact@v3
with:
name: coverage-report-py${{ matrix.python-version }}
path: |
coverage.xml
htmlcov/
test-results-${{ matrix.python-version }}.xml
code-quality:
name: Code Quality & Linting
runs-on: ubuntu-latest
if: ${{ !inputs.skip_tests }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install linting tools
run: |
python -m pip install --upgrade pip
pip install flake8 pylint black isort mypy
- name: Run flake8
run: |
flake8 pterodisbot.py server_metrics_graphs.py \
--max-line-length=120 \
--ignore=E501,W503,E203 \
--exclude=venv,__pycache__,build,dist \
--statistics \
--output-file=flake8-report.txt
continue-on-error: true
- name: Run pylint
run: |
pylint pterodisbot.py server_metrics_graphs.py \
--disable=C0111,C0103,R0913,R0914,R0915,W0718 \
--max-line-length=120 \
--output-format=text \
--reports=y > pylint-report.txt || true
continue-on-error: true
- name: Check code formatting with black
run: |
black --check --line-length=120 --diff pterodisbot.py server_metrics_graphs.py | tee black-report.txt
continue-on-error: true
- name: Check import ordering
run: |
isort --check-only --profile black --line-length=120 pterodisbot.py server_metrics_graphs.py
continue-on-error: true
- name: Type checking with mypy
run: |
mypy pterodisbot.py server_metrics_graphs.py --ignore-missing-imports > mypy-report.txt || true
continue-on-error: true
- name: Upload linting reports
uses: actions/upload-artifact@v3
with:
name: code-quality-reports
path: |
flake8-report.txt
pylint-report.txt
black-report.txt
mypy-report.txt
security-scan:
name: Security Scanning
runs-on: ubuntu-latest
if: ${{ !inputs.skip_tests }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install security tools
run: |
python -m pip install --upgrade pip
pip install bandit safety pip-audit
- name: Run bandit security scan
run: |
bandit -r . \
-f json \
-o bandit-report.json \
-ll \
--exclude ./venv,./test_*.py,./tests
continue-on-error: true
- name: Run safety dependency check
run: |
pip install -r requirements.txt
safety check --json --output safety-report.json || true
continue-on-error: true
- name: Run pip-audit
run: |
pip-audit --desc --format json --output pip-audit-report.json || true
continue-on-error: true
- name: Upload security reports
uses: actions/upload-artifact@v3
with:
name: security-reports
path: |
bandit-report.json
safety-report.json
pip-audit-report.json
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
needs: [unit-tests]
if: ${{ !inputs.skip_tests }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Cache dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-integration-${{ hashFiles('requirements.txt') }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-test.txt
- name: Create test configuration
run: |
mkdir -p embed logs
cat > config.ini << EOF
[Pterodactyl]
PanelURL = https://panel.example.com
ClientAPIKey = ptlc_test_client_key_123456789
ApplicationAPIKey = ptla_test_app_key_987654321
[Discord]
Token = test_discord_token_placeholder
AllowedGuildID = 123456789
EOF
- name: Run integration tests
run: |
pytest test_pterodisbot.py::TestIntegration \
-v \
--tb=short \
--timeout=60
# ==========================================
# BUILD STAGE
# ==========================================
docker-build:
name: Build Docker Image
runs-on: ubuntu-latest
needs: [unit-tests, code-quality, security-scan]
if: |
always() &&
(needs.unit-tests.result == 'success' || inputs.skip_tests) &&
(github.event_name == 'push' || github.event_name == 'workflow_dispatch')
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: linux/amd64,linux/arm64
driver-opts: |
image=moby/buildkit:latest
- name: Log in to registry
uses: docker/login-action@v2
with:
registry: ${{ vars.REGISTRY }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Generate Docker image tags
id: tags
run: |
IMAGE_NAME="${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}"
if [ -n "${{ github.event.inputs.image_tag }}" ]; then
PRIMARY_TAG="${{ github.event.inputs.image_tag }}"
elif [[ ${{ github.ref }} == refs/tags/v* ]]; then
PRIMARY_TAG="${GITHUB_REF#refs/tags/}"
elif [[ ${{ github.ref }} == refs/heads/main ]]; then
PRIMARY_TAG="latest"
elif [[ ${{ github.ref }} == refs/heads/experimental ]]; then
PRIMARY_TAG="experimental"
elif [[ ${{ github.ref }} == refs/heads/dev ]]; then
PRIMARY_TAG="dev"
else
PRIMARY_TAG="latest"
fi
TAGS="$IMAGE_NAME:$PRIMARY_TAG,$IMAGE_NAME:${{ github.sha }}"
if [[ ${{ github.ref }} == refs/tags/v* ]]; then
MAJOR_MINOR_TAG=$(echo "$PRIMARY_TAG" | sed -E 's/^v([0-9]+\.[0-9]+)\.[0-9]+.*$/v\1/')
if [[ "$MAJOR_MINOR_TAG" != "$PRIMARY_TAG" ]]; then
TAGS="$TAGS,$IMAGE_NAME:$MAJOR_MINOR_TAG"
fi
MAJOR_TAG=$(echo "$PRIMARY_TAG" | sed -E 's/^v([0-9]+)\.[0-9]+\.[0-9]+.*$/v\1/')
if [[ "$MAJOR_TAG" != "$PRIMARY_TAG" ]]; then
TAGS="$TAGS,$IMAGE_NAME:$MAJOR_TAG"
fi
fi
echo "tags=$TAGS" >> $GITHUB_OUTPUT
echo "Generated tags: $TAGS"
- name: Build and push multi-arch image
uses: docker/build-push-action@v4
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
cache-from: type=registry,ref=${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}:cache
cache-to: type=registry,ref=${{ vars.REGISTRY }}/${{ github.repository_owner }}/${{ vars.IMAGE_NAME }}:cache,mode=max
tags: ${{ steps.tags.outputs.tags }}
labels: |
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
# ==========================================
# REPORTING STAGE
# ==========================================
test-report:
name: Generate Test Report
runs-on: ubuntu-latest
needs: [unit-tests, code-quality, security-scan, integration-tests]
if: always() && !inputs.skip_tests
steps:
- name: Download all artifacts
uses: actions/download-artifact@v3
- name: Generate test summary
run: |
echo "## 🧪 Test Results Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Job Status:" >> $GITHUB_STEP_SUMMARY
echo "- ✅ Unit Tests: \`${{ needs.unit-tests.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "- 🎨 Code Quality: \`${{ needs.code-quality.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "- 🔒 Security Scan: \`${{ needs.security-scan.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "- 🔗 Integration Tests: \`${{ needs.integration-tests.result }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Artifacts Generated:" >> $GITHUB_STEP_SUMMARY
echo "- Coverage reports (HTML & XML)" >> $GITHUB_STEP_SUMMARY
echo "- Code quality reports (flake8, pylint, black)" >> $GITHUB_STEP_SUMMARY
echo "- Security scan reports (bandit, safety)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Commit:** \`${{ github.sha }}\`" >> $GITHUB_STEP_SUMMARY
echo "**Branch:** \`${{ github.ref_name }}\`" >> $GITHUB_STEP_SUMMARY
echo "**Triggered by:** ${{ github.actor }}" >> $GITHUB_STEP_SUMMARY
final-status:
name: CI/CD Pipeline Status
runs-on: ubuntu-latest
needs: [test-report, docker-build]
if: always()
steps:
- name: Check pipeline status
run: |
echo "## 🚀 CI/CD Pipeline Complete" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [[ "${{ needs.docker-build.result }}" == "success" ]]; then
echo "✅ **Docker image built and pushed successfully**" >> $GITHUB_STEP_SUMMARY
elif [[ "${{ needs.docker-build.result }}" == "skipped" ]]; then
echo "⏭️ **Docker build skipped**" >> $GITHUB_STEP_SUMMARY
else
echo "❌ **Docker build failed**" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Pipeline run:** ${{ github.run_number }}" >> $GITHUB_STEP_SUMMARY
echo "**Workflow:** ${{ github.workflow }}" >> $GITHUB_STEP_SUMMARY
- name: Fail if critical jobs failed
if: |
(needs.unit-tests.result == 'failure' && !inputs.skip_tests) ||
needs.docker-build.result == 'failure'
run: exit 1