Skip to content

[AI: Windsurf] add: パフォーマンステストのレポート生成機能を追加 #1

[AI: Windsurf] add: パフォーマンステストのレポート生成機能を追加

[AI: Windsurf] add: パフォーマンステストのレポート生成機能を追加 #1

name: CI/CD Pipeline
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]
schedule:
- cron: '0 0 * * 0' # 毎週日曜日の午前0時に実行
defaults:
run:
shell: bash
jobs:
test:
name: Run Tests
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.8', '3.9', '3.10']
exclude:
- os: windows-latest
python-version: '3.8'
- os: macos-latest
python-version: '3.8'
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install system dependencies
if: runner.os == 'Linux'
run: |
sudo apt-get update
sudo apt-get install -y python3-dev default-libmysqlclient-dev build-essential
- name: Set up virtual environment
run: |
python -m venv .venv
source .venv/bin/activate
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
pip install -r requirements-test.txt
- name: Run tests with coverage
run: |
source .venv/bin/activate
python -m pytest --cov=src --cov-report=xml --cov-report=term-missing
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./coverage.xml
fail_ci_if_error: true
lint:
name: Lint Code
runs-on: ubuntu-latest
needs: test
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements-dev.txt
- name: Run linters
run: |
# Flake8
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
flake8 . --count --max-complexity=10 --max-line-length=127 --statistics
# Black
black --check .
# isort
isort --check-only .
# mypy
mypy .
security:
name: Security Scan
runs-on: ubuntu-latest
needs: [test, lint]
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Run Bandit
uses: PyCQA/bandit@main
with:
args: -r src/ -c pyproject.toml -f json -o bandit-results.json
- name: Upload Bandit results
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: bandit-results.json
build:
name: Build Package
runs-on: ubuntu-latest
needs: [test, lint, security]
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
pip install build
- name: Build package
run: |
python -m build
- name: Upload package artifact
uses: actions/upload-artifact@v3
with:
name: python-package
path: dist/*
performance-test:
name: Performance Test
runs-on: ubuntu-latest
needs: build
services:
postgres:
image: postgres:13
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: test_db
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y python3-dev python3-pip python3-venv
- name: Create and activate virtual environment
run: |
python -m venv .venv
echo "$GITHUB_WORKSPACE/.venv/bin" >> $GITHUB_PATH
- name: Install project dependencies
run: |
. .venv/bin/activate
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
pip install -r tests/performance/requirements-test.txt
- name: Create required directories
run: |
mkdir -p results
mkdir -p reports
mkdir -p plots
- name: Start PostgreSQL
uses: harmon758/postgresql-action@v1
with:
postgresql version: '14'
postgresql db: 'test_db'
postgresql user: 'postgres'
postgresql password: 'postgres'
- name: Run database migrations
env:
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/test_db
run: |
source .venv/bin/activate
cd flet-multiplatform-app
alembic upgrade head
- name: Create required directories
run: |
mkdir -p flet-multiplatform-app/results
mkdir -p flet-multiplatform-app/reports
- name: Start application in background
env:
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/test_db
APP_ENV: test
run: |
source .venv/bin/activate
cd flet-multiplatform-app
nohup uvicorn src.backend.app.main:app --host 0.0.0.0 --port 8000 > app.log 2>&1 &
echo $! > app.pid
echo "Waiting for application to start..."
sleep 5
- name: Run performance tests
env:
# Database configuration
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/test_db
APP_ENV: test
BASE_URL: http://localhost:8000
# Performance test configuration
PERFORMANCE_TEST_ENABLED: "true"
PERFORMANCE_TEST_GENERATE_REPORT: "true"
PERFORMANCE_TEST_SAVE_RESULTS: "true"
# Test data configuration
GENERATE_TEST_DATA: "true"
TEST_DATA_SIZE: "medium"
# Load test settings
PERF_LOAD_USERS: 10
PERF_LOAD_SPAWN_RATE: 2
PERF_LOAD_DURATION: "30s"
PERF_LOAD_WARM_UP: 5
# Stress test settings
PERF_STRESS_USERS: 20
PERF_STRESS_SPAWN_RATE: 5
PERF_STRESS_DURATION: "1m"
PERF_STRESS_WARM_UP: 5
# Endurance test settings
PERF_ENDURANCE_USERS: 5
PERF_ENDURANCE_SPAWN_RATE: 1
PERF_ENDURANCE_DURATION: "2m"
PERF_ENDURANCE_WARM_UP: 10
# Scalability test settings
PERF_SCALE_START_USERS: 1
PERF_SCALE_MAX_USERS: 5
PERF_SCALE_STEP_SIZE: 1
PERF_SCALE_STEP_DURATION: "30s"
PERF_SCALE_WARM_UP: 5
run: |
source .venv/bin/activate
cd flet-multiplatform-app
echo "🔄 Generating test data..."
python scripts/generate_performance_test_data.py --size $TEST_DATA_SIZE
echo "🚀 Starting performance tests..."
python scripts/run_performance_tests.py
echo "📊 Generating performance report..."
python scripts/generate_performance_report.py
echo "✅ Performance tests completed"
# Generate coverage report for unit tests in performance tests
coverage run --source=. --omit="*/tests/*,*/migrations/*,*/venv/*,*/.tox/*,*/node_modules/*" -m pytest tests/performance/unit -v
coverage xml -o coverage.xml
coverage html -d coverage_html_report
- name: Upload performance test results
if: always()
uses: actions/upload-artifact@v3
with:
name: performance-test-results
path: |
flet-multiplatform-app/reports/performance
flet-multiplatform-app/test-results/performance
retention-days: 7
if-no-files-found: warn
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./flet-multiplatform-app/coverage.xml
flags: performance
name: codecov-performance
fail_ci_if_error: false
- name: Send Slack notification
if: always()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
run: |
source .venv/bin/activate
cd flet-multiplatform-app
python scripts/send_slack_notification.py \
--results-dir results \
--repo-url "https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" \
--run-id "$GITHUB_RUN_ID"
continue-on-error: true
- name: Upload application logs
if: always()
uses: actions/upload-artifact@v3
with:
name: app-logs
path: flet-multiplatform-app/app.log
- name: Run performance analysis
if: always()
run: |
source .venv/bin/activate
cd flet-multiplatform-app
echo "Running performance analysis..."
python scripts/analyze_performance.py --test-type all
- name: Upload performance test results
if: always()
uses: actions/upload-artifact@v3
with:
name: performance-test-results
path: |
flet-multiplatform-app/results/*
flet-multiplatform-app/reports/*
flet-multiplatform-app/plots/*
- name: Stop application
if: always()
run: |
if [ -f flet-multiplatform-app/app.pid ]; then
kill $(cat flet-multiplatform-app/app.pid) || true
rm -f flet-multiplatform-app/app.pid
fi
- name: Show test results
if: always()
run: |
echo "Test results:"
ls -la flet-multiplatform-app/results/
echo -e "\nReports:"
ls -la flet-multiplatform-app/reports/
# Run load test
echo "Running load test..."
pytest tests/performance/test_load.py -v --perf-test --perf-config=tests/performance/config/perf_config.json --junitxml=test-results/performance/load-test-results.xml || echo "Load test completed with some failures"
echo "Running stress test..."
pytest tests/performance/test_stress.py -v --perf-test --perf-config=tests/performance/config/perf_config.json --junitxml=test-results/performance/stress-test-results.xml || echo "Stress test completed with some failures"
# Run endurance test (shorter version for CI)
echo "Running endurance test..."
pytest tests/performance/test_endurance.py -v --perf-test --perf-config=tests/performance/config/perf_config.json --junitxml=test-results/performance/endurance-test-results.xml -k "not test_sustained_load" || echo "Endurance test completed with some failures"
# Run scalability test
echo "Running scalability test..."
pytest tests/performance/test_scalability.py -v --perf-test --perf-config=tests/performance/config/perf_config.json --junitxml=test-results/performance/scalability-test-results.xml || echo "Scalability test completed with some failures"
- name: Upload test results
if: always()
uses: actions/upload-artifact@v3
with:
name: performance-test-results
path: |
test-results/performance/*.xml
app.log
- name: Stop application
if: always()
run: |
if [ -f app.pid ]; then
kill $(cat app.pid) || true
fi
deploy:
name: Deploy
runs-on: ubuntu-latest
needs: build
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop'
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Download package artifact
uses: actions/download-artifact@v3
with:
name: python-package
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install twine
- name: Publish to PyPI
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
run: |
twine upload --skip-existing dist/*
notify:
name: Notify
runs-on: ubuntu-latest
needs: [test, lint, security, build, performance-test]
if: always()
steps:
- name: Send notification
uses: rtCamp/action-slack-notify@v2
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_TITLE: "CI/CD Pipeline ${{ job.status }}"
SLACK_MESSAGE: "${{ github.workflow }} workflow run #${{ github.run_number }} has ${{ job.status }} for ${{ github.ref }}."
SLACK_COLOR: ${{ job.status == 'success' && 'good' || 'danger' }}