Testing Guide
Comprehensive testing guidelines for PVTools development, covering unit tests, integration tests, and validation procedures.
Testing Philosophy
PVTools follows a test-driven development approach with multiple layers of testing:
- Unit Tests: Test individual functions and classes in isolation
- Integration Tests: Test module interactions and data flow
- System Tests: Test complete optimization workflows
- Performance Tests: Validate optimization speed and resource usage
- Validation Tests: Verify results against known benchmarks
Test Structure
tests/
├── __init__.py
├── conftest.py # Pytest configuration and fixtures
├── unit/ # Unit tests
│ ├── test_technical.py # Technical calculations
│ ├── test_finance.py # Financial functions
│ ├── test_bess.py # Battery modeling
│ ├── test_processes.py # Data processing
│ └── test_optimisation.py # Core optimization logic
├── integration/ # Integration tests
│ ├── test_api_integration.py # External API integration
│ ├── test_optimization_flow.py # End-to-end optimization
│ └── test_data_pipeline.py # Data processing pipeline
├── performance/ # Performance tests
│ ├── test_optimization_speed.py
│ └── test_memory_usage.py
├─ ─ validation/ # Validation against benchmarks
│ ├── test_known_systems.py # Validate against real systems
│ └── test_regression.py # Prevent regression
└── fixtures/ # Test data and configurations
├── sample_configs.yaml
├── test_data.json
└── mock_responses/
Test Configuration
conftest.py Setup
import pytest
import yaml
import json
from pathlib import Path
from unittest.mock import Mock, patch
import pandas as pd
import numpy as np
from pv_tools.optimiser import main, optimise
from pv_tools.ProductionModelling import modelSetup
from pv_tools.Utilities.BESS import BatteryModel
@pytest.fixture
def sample_config():
"""Load sample configuration for testing."""
config_path = Path(__file__).parent / 'fixtures' / 'sample_configs.yaml'
with open(config_path, 'r') as f:
return yaml.safe_load(f)
@pytest.fixture
def mock_pvgis_response():
"""Mock PVGIS API response."""
return {
'outputs': {
'monthly': {
'fixed': [
{'month': 1, 'E_m': 450.2, 'H(i)_m': 145.8},
{'month': 2, 'E_m': 425.6, 'H(i)_m': 138.2},
# ... more months
]
},
'hourly': [
{'time': '20200101:0000', 'P': 0.0, 'G(i)': 0, 'H_sun': 0},
{'time': '20200101:0100', 'P': 0.0, 'G(i)': 0, 'H_sun': 0},
# ... more hours
]
}
}
@pytest.fixture
def sample_consumption_profile():
"""Generate realistic consumption profile for testing."""
np.random.seed(42) # Reproducible random data
# Base load with daily pattern
hours = range(24)
base_pattern = [
0.3, 0.25, 0.2, 0.15, 0.15, 0.2, # 00:00-05:00 (night)
0.4, 0.6, 0.8, 0.9, 0.85, 0.8, # 06:00-11:00 (morning)
0.75, 0.7, 0.7, 0.75, 0.85, 1.0, # 12:00-17:00 (afternoon)
1.0, 0.9, 0.8, 0.7, 0.6, 0.4 # 18:00-23:00 (evening)
]
# Add random variation
variation = np.random.normal(1.0, 0.1, 24)
consumption = [base * var * 10 for base, var in zip(base_pattern, variation)]
return consumption
@pytest.fixture
def battery_model():
"""Create standard battery model for testing."""
return BatteryModel(
capacity_kwh=10.0,
power_rating_kw=5.0,
efficiency=0.90,
min_soc=0.20,
max_soc=1.0
)
@pytest.fixture
def mock_external_apis():
"""Mock all external API calls."""
with patch('pv_tools.Utilities.Processes.getHistoricalData') as mock_pvgis, \
patch('pv_tools.Utilities.Processes.getCoordinates') as mock_geocode:
mock_pvgis.return_value = {
'outputs': {'monthly': {'fixed': [
{'month': i, 'E_m': 400 + i*10} for i in range(1, 13)
]}}
}
mock_geocode.return_value = (3.1390, 101.6869) # Kuala Lumpur
yield mock_pvgis, mock_geocode
Unit Testing
Testing Technical Calculations
# tests/unit/test_technical.py
import pytest
from pv_tools.Utilities.Technical import calculate_bill, size_inverters, solve_pre_surcharge_bill
class TestBillCalculation:
"""Test TNB bill calculation functions."""
def test_domestic_tariff_calculation(self, sample_config):
"""Test domestic (B) tariff calculation."""
# Test normal consumption
bill = calculate_bill(sample_config, "B", 350.0)
assert 140 < bill < 160 # Expected range for 350 kWh
# Test tiered structure
low_bill = calculate_bill(sample_config, "B", 100.0)
high_bill = calculate_bill(sample_config, "B", 500.0)
assert low_bill < bill < high_bill
def test_commercial_tariff_calculation(self, sample_config):
"""Test commercial (C1) tariff with demand charges."""
bill = calculate_bill(sample_config, "C1", 2000.0, peak_power_demand=15.0)
# Should include both energy and demand charges
energy_only_bill = calculate_bill(sample_config, "C1", 2000.0, peak_power_demand=0.0)
assert bill > energy_only_bill
def test_negative_consumption_export(self, sample_config):
"""Test handling of negative consumption (solar export)."""
# Mock SMP export rate
sample_config["standard_parameters"]["SMP_export"]["rate"] = 0.31
export_bill = calculate_bill(sample_config, "B", -100.0)
assert export_bill == -31.0 # 100 kWh * 0.31 RM/kWh
@pytest.mark.parametrize("consumption,expected_range", [
(0, (3, 5)), # Service charge only
(200, (70, 90)), # Low tier
(600, (200, 250)), # High tier
(1000, (350, 450)) # Very high consumption
])
def test_tariff_ranges(self, sample_config, consumption, expected_range):
"""Test bill calculation for various consumption levels."""
bill = calculate_bill(sample_config, "B", consumption)
assert expected_range[0] <= bill <= expected_range[1]
class TestInverterSizing:
"""Test inverter sizing algorithms."""
def test_single_inverter_sizing(self, sample_config):
"""Test sizing with single inverter type."""
inverter_data = {
"models": [
{"model": "Test-5kW", "capacity_kw": 5.0, "cost": 3000}
]
}
result = size_inverters(15.0, inverter_data)
assert result["quantity"] == 3 # 15 kWp / 5 kW = 3 inverters
assert result["total_capacity"] == 15.0
assert result["total_cost"] == 9000 # 3 * 3000
def test_multiple_inverter_options(self, sample_config):
"""Test optimization across multiple inverter options."""
inverter_data = {
"models": [
{"model": "Small-3kW", "capacity_kw": 3.0, "cost": 2000},
{"model": "Large-10kW", "capacity_kw": 10.0, "cost": 5000}
]
}
result = size_inverters(15.0, inverter_data)
# Should select cost-optimal configuration
assert result["feasible"] is True
assert result["total_cost"] > 0
class TestReverseBillCalculation:
"""Test reverse bill calculation for load estimation."""
def test_basic_reverse_calculation(self, sample_config):
"""Test reversing simple tariff calculation."""
target_bill = 150.0
consumption = solve_pre_surcharge_bill(
sample_config, "B", target_bill, 10.0, 0.5
)
# Verify by calculating bill forward
calculated_bill = calculate_bill(sample_config, "B", consumption, 10.0, 0.5)
assert abs(calculated_bill - target_bill) < 5.0 # Within 5 RM tolerance
def test_reverse_calculation_convergence(self, sample_config):
"""Test convergence for various target bills."""
target_bills = [50, 100, 200, 500, 1000]
for target in target_bills:
consumption = solve_pre_surcharge_bill(sample_config, "B", target, 20.0, 0.6)
verification_bill = calculate_bill(sample_config, "B", consumption, 20.0, 0.6)
error_percentage = abs(verification_bill - target) / target
assert error_percentage < 0.05 # Within 5% error
Testing Financial Functions
# tests/unit/test_finance.py
import pytest
from pv_tools.Utilities.Finance import calculate_npv, calculate_irr, calculate_loan_payment
class TestNPVCalculations:
"""Test Net Present Value calculations."""
def test_simple_npv(self):
"""Test basic NPV calculation."""
cash_flows = [-1000, 300, 300, 300, 300, 300] # 5-year investment
npv = calculate_npv(cash_flows, 0.10)
# Expected NPV ≈ 137.24
assert 130 < npv < 145
def test_zero_discount_rate(self):
"""Test NPV with zero discount rate."""
cash_flows = [-1000, 200, 200, 200, 200, 200]
npv = calculate_npv(cash_flows, 0.0)
# Should equal sum of cash flows
assert npv == sum(cash_flows)
def test_negative_npv(self):
"""Test calculation with negative NPV."""
cash_flows = [-1000, 100, 100, 100] # Poor investment
npv = calculate_npv(cash_flows, 0.15)
assert npv < 0 # Should be negative
@pytest.mark.parametrize("discount_rate", [0.05, 0.08, 0.12, 0.20])
def test_npv_discount_rate_sensitivity(self, discount_rate):
"""Test NPV sensitivity to discount rate."""
cash_flows = [-1000, 300, 300, 300, 300, 300]
npv = calculate_npv(cash_flows, discount_rate)
# Higher discount rate should give lower NPV
if discount_rate > 0.10:
npv_lower_rate = calculate_npv(cash_flows, 0.10)
assert npv < npv_lower_rate
class TestIRRCalculations:
"""Test Internal Rate of Return calculations."""
def test_simple_irr(self):
"""Test basic IRR calculation."""
cash_flows = [-1000, 300, 300, 300, 300, 300]
irr = calculate_irr(cash_flows)
# Expected IRR ≈ 15.24%
assert 0.14 < irr < 0.17
def test_irr_verification(self):
"""Verify IRR by checking NPV equals zero."""
cash_flows = [-1000, 400, 400, 400]
irr = calculate_irr(cash_flows)
# NPV at IRR should be approximately zero
npv_at_irr = calculate_npv(cash_flows, irr)
assert abs(npv_at_irr) < 1e-6
def test_no_positive_cash_flows(self):
"""Test IRR with no positive cash flows."""
cash_flows = [-1000, -200, -300]
with pytest.raises(ValueError):
calculate_irr(cash_flows)
class TestLoanCalculations:
"""Test loan payment calculations."""
def test_loan_payment_calculation(self):
"""Test monthly payment calculation."""
# RM 100,000 loan, 5% annual rate, 20 years
payment = calculate_loan_payment(100000, 0.05, 20)
# Expected payment ≈ RM 659.96
assert 650 < payment < 670
def test_zero_interest_loan(self):
"""Test interest-free loan payment."""
payment = calculate_loan_payment(120000, 0.0, 10)
# Should equal principal / number of payments
expected = 120000 / (10 * 12)
assert abs(payment - expected) < 0.01
def test_loan_payment_scaling(self):
"""Test payment scaling with principal amount."""
payment_100k = calculate_loan_payment(100000, 0.06, 15)
payment_200k = calculate_loan_payment(200000, 0.06, 15)
# Payment should scale linearly with principal
assert abs(payment_200k - 2 * payment_100k) < 1.0
Testing Battery Models
# tests/unit/test_bess.py
import pytest
from pv_tools.Utilities.BESS import BatteryModel
class TestBatteryModel:
"""Test battery energy storage system modeling."""
def test_battery_initialization(self):
"""Test battery model initialization."""
battery = BatteryModel(
capacity_kwh=10.0,
power_rating_kw=5.0,
efficiency=0.90,
min_soc=0.20,
max_soc=1.0
)
assert battery.capacity == 10.0
assert battery.power_rating == 5.0
assert battery.soc == 0.5 # Default initial SOC
assert battery.total_cycles == 0.0
def test_battery_charging(self, battery_model):
"""Test battery charging process."""
initial_soc = battery_model.soc
# Charge for 2 hours at 3 kW
energy_stored = battery_model.charge(3.0, 2.0)
# Should store 6 kWh * 0.90 efficiency = 5.4 kWh
assert abs(energy_stored - 5.4) < 0.1
# SOC should increase
assert battery_model.soc > initial_soc
def test_battery_discharging(self, battery_model):
"""Test battery discharging process."""
# First charge the battery
battery_model.charge(5.0, 1.0) # Charge to higher SOC
initial_soc = battery_model.soc
# Discharge for 1 hour at 2 kW
energy_delivered = battery_model.discharge(2.0, 1.0)
# Should deliver 2 kWh * 0.90 efficiency = 1.8 kWh
assert abs(energy_delivered - 1.8) < 0.1
# SOC should decrease
assert battery_model.soc < initial_soc
def test_soc_limits(self, battery_model):
"""Test SOC limit enforcement."""
# Try to overcharge
battery_model.soc = 0.95
energy_stored = battery_model.charge(5.0, 2.0) # Attempt 10 kWh
# Should be limited by max SOC
assert battery_model.soc <= battery_model.max_soc
assert energy_stored < 10.0 * battery_model.efficiency
# Try to over-discharge
battery_model.soc = 0.25
energy_delivered = battery_model.discharge(5.0, 2.0) # Attempt 10 kWh
# Should be limited by min SOC
assert battery_model.soc >= battery_model.min_soc
def test_power_rating_limits(self, battery_model):
"""Test power rating limit enforcement."""
# Attempt to charge/discharge above power rating
energy_stored = battery_model.charge(10.0, 1.0) # Above 5 kW rating
# Should be limited to power rating * efficiency
max_expected = battery_model.power_rating * battery_model.efficiency
assert energy_stored <= max_expected + 0.1
def test_cycle_counting(self, battery_model):
"""Test cycle life tracking."""
initial_cycles = battery_model.total_cycles
# Perform partial cycle (50% discharge)
battery_model.discharge(2.5, 2.0) # 5 kWh = 50% of 10 kWh capacity
# Should increment cycle count by 0.5
cycle_increase = battery_model.total_cycles - initial_cycles
assert abs(cycle_increase - 0.5) < 0.1
def test_degradation_calculation(self, battery_model):
"""Test battery degradation modeling."""
# New battery should have no degradation
assert battery_model.get_degradation_factor() == 1.0
# Simulate aging
battery_model.total_cycles = battery_model.cycle_life * 0.5 # 50% of life
degradation_factor = battery_model.get_degradation_factor()
# Should have some degradation but > 80%
assert 0.8 < degradation_factor < 1.0
# End of life
battery_model.total_cycles = battery_model.cycle_life
assert battery_model.get_degradation_factor() == 0.8
Integration Testing
Testing Complete Optimization Flow
# tests/integration/test_optimization_flow.py
import pytest
from unittest.mock import patch
from pv_tools.optimiser import main, optimise
class TestOptimizationFlow:
"""Test complete optimization workflow."""
@patch('pv_tools.Utilities.Processes.getHistoricalData')
@patch('pv_tools.Utilities.Processes.getCoordinates')
def test_complete_tokenization_ppa_flow(self, mock_geocode, mock_pvgis, sample_config):
"""Test complete tokenization PPA optimization."""
# Setup mocks
mock_geocode.return_value = (3.1390, 101.6869)
mock_pvgis.return_value = {
'outputs': {'monthly': {'fixed': [
{'month': i, 'E_m': 400 + i*10} for i in range(1, 13)
]}}
}
# Configure for tokenization PPA
sample_config['parameters']['financing'] = 'Tokenization'
sample_config['parameters']['contract_type'] = 'PPA'
# Run optimization
consumption_profile = main(sample_config)
results, updated_config = optimise(sample_config, consumption_profile)
# Validate results structure
assert 'system_size' in results
assert 'customer_savings' in results
assert 'investor_returns' in results
assert 'ESG' in results
# Validate result ranges
assert 0 < results['system_size'] < 50 # Reasonable system size
assert results['customer_savings'] >= 0 # Non-negative savings
assert 'annual_tonnes_of_CO2_reduced' in results['ESG']
def test_term_loan_optimization(self, mock_external_apis, sample_config):
"""Test term loan optimization flow."""
sample_config['parameters']['financing'] = 'TermLoan'
sample_config['parameters']['contract_type'] = 'DirectPurchase'
consumption_profile = main(sample_config)
results, updated_config = optimise(sample_config, consumption_profile)
# Term loan specific validations
assert 'loan_amount' in results or 'financing_details' in results
assert results['customer_roi'] > 0 # Should have positive ROI
def test_optimization_with_battery(self, mock_external_apis, sample_config):
"""Test optimization with battery storage."""
# Enable battery optimization
sample_config['parameters']['include_battery'] = True
sample_config['parameters']['max_battery_size'] = 20.0
consumption_profile = main(sample_config)
results, updated_config = optimise(sample_config, consumption_profile)
if 'battery_size' in results:
assert 0 <= results['battery_size'] <= 20.0
assert 'battery_savings' in results or 'peak_shaving_value' in results
class TestDataPipeline:
"""Test data processing pipeline."""
def test_configuration_loading_and_validation(self, sample_config):
"""Test configuration loading and enhancement."""
from pv_tools.ProductionModelling import modelSetup
enhanced_config = modelSetup(sample_config)
# Check that all required data is loaded
assert 'grid_tariffs' in enhanced_config
assert 'prices' in enhanced_config
assert 'ESG' in enhanced_config
assert 'latitude' in enhanced_config
assert 'longitude' in enhanced_config
@patch('pv_tools.Utilities.Processes.getHistoricalData')
def test_production_data_processing(self, mock_pvgis, sample_config):
"""Test solar production data processing."""
from pv_tools.ProductionModelling import historical_monthly_avg
# Mock PVGIS response
mock_pvgis.return_value = {
'outputs': {'monthly': {'fixed': [
{'month': i, 'E_m': 350 + i*25} for i in range(1, 13)
]}}
}
enhanced_config = modelSetup(sample_config)
monthly_data = historical_monthly_avg(enhanced_config)
# Validate data structure
assert len(monthly_data) == 12
assert all(col in monthly_data.columns for col in ['month', 'mean', 'low_bound', 'upp_bound'])
assert all(monthly_data['mean'] > 0)
assert all(monthly_data['low_bound'] <= monthly_data['mean'])
assert all(monthly_data['mean'] <= monthly_data['upp_bound'])
Performance Testing
Optimization Speed Tests
# tests/performance/test_optimization_speed.py
import pytest
import time
from pv_tools.optimiser import main, optimise
class TestOptimizationPerformance:
"""Test optimization performance and resource usage."""
def test_optimization_speed(self, mock_external_apis, sample_config):
"""Test that optimization completes within reasonable time."""
start_time = time.time()
consumption_profile = main(sample_config)
results, updated_config = optimise(sample_config, consumption_profile)
elapsed_time = time.time() - start_time
# Optimization should complete within 30 seconds
assert elapsed_time < 30.0, f"Optimization took {elapsed_time:.2f} seconds"
def test_memory_usage(self, mock_external_apis, sample_config):
"""Test memory usage during optimization."""
import psutil
import os
process = psutil.Process(os.getpid())
initial_memory = process.memory_info().rss / 1024 / 1024 # MB
consumption_profile = main(sample_config)
results, updated_config = optimise(sample_config, consumption_profile)
final_memory = process.memory_info().rss / 1024 / 1024 # MB
memory_increase = final_memory - initial_memory
# Memory increase should be reasonable (< 100 MB)
assert memory_increase < 100, f"Memory increased by {memory_increase:.1f} MB"
@pytest.mark.parametrize("system_size_limit", [10, 50, 100, 500])
def test_scaling_with_system_size(self, mock_external_apis, sample_config, system_size_limit):
"""Test optimization scaling with different system size limits."""
sample_config['max_system_size'] = system_size_limit
start_time = time.time()
consumption_profile = main(sample_config)
results, updated_config = optimise(sample_config, consumption_profile)
elapsed_time = time.time() - start_time
# Time should scale reasonably with problem size
assert elapsed_time < 60.0 # Maximum 1 minute for any size
Validation Testing
Known System Validation
# tests/validation/test_known_systems.py
import pytest
class TestKnownSystemValidation:
"""Validate optimization results against known real-world systems."""
def test_residential_system_benchmark(self, mock_external_apis):
"""Test against known residential system performance."""
# Known system: 5 kWp residential in Kuala Lumpur
config = {
'parameters': {
'location': {'latitude': 3.1390, 'longitude': 101.6869},
'tariff_category': 'B',
'monthly_bill': 180.0,
'peak_power_demand': 8.0,
'financing': 'Tokenization',
'contract_type': 'DirectPurchase',
# ... other parameters
}
}
from pv_tools.optimiser import main, optimise
consumption = main(config)
results, _ = optimise(config, consumption)
# Validate against known benchmarks
assert 4.0 <= results['system_size'] <= 7.0 # Reasonable size range
assert results['customer_roi'] > 5.0 # Should be profitable
assert 500 <= results['ESG']['annual_tonnes_of_CO2_reduced'] <= 3000
def test_commercial_system_benchmark(self, mock_external_apis):
"""Test against known commercial system performance."""
config = {
'parameters': {
'location': {'latitude': 3.0000, 'longitude': 101.5000},
'tariff_category': 'C1',
'monthly_bill': 2500.0,
'peak_power_demand': 45.0,
'financing': 'TermLoan',
'contract_type': 'PPA',
# ... other parameters
}
}
from pv_tools.optimiser import main, optimise
consumption = main(config)
results, _ = optimise(config, consumption)
# Commercial system validations
assert 20.0 <= results['system_size'] <= 100.0
assert results['customer_savings'] > 10000 # Significant annual savings
Running Tests
Basic Test Execution
# Run all tests
pytest
# Run with coverage
pytest --cov=pv_tools --cov-report=html
# Run specific test category
pytest tests/unit/
pytest tests/integration/
pytest tests/performance/
# Run specific test file
pytest tests/unit/test_technical.py -v
# Run specific test function
pytest tests/unit/test_technical.py::TestBillCalculation::test_domestic_tariff_calculation -v
# Run tests with specific markers
pytest -m "not slow" # Skip slow tests
pytest -m "integration" # Run only integration tests
Test Configuration
pytest.ini
[tool:pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
markers =
slow: marks tests as slow (deselect with '-m "not slow"')
integration: marks tests as integration tests
performance: marks tests as performance tests
validation: marks tests as validation tests
addopts =
--strict-markers
--disable-warnings
--tb=short
filterwarnings =
ignore::UserWarning
ignore::DeprecationWarning
Continuous Integration
GitHub Actions Configuration
# .github/workflows/test.yml
name: Test Suite
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.9, 3.10, 3.11]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install pytest pytest-cov
- name: Run tests
run: |
pytest --cov=pv_tools --cov-report=xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
fail_ci_if_error: true
Test Data Management
Fixtures and Test Data
Create realistic test data that represents actual Malaysian solar market conditions:
# tests/fixtures/create_test_data.py
import json
import yaml
def create_sample_configs():
"""Create realistic sample configurations for testing."""
configs = {
'residential_basic': {
'parameters': {
'location': {'latitude': 3.1390, 'longitude': 101.6869},
'tariff_category': 'B',
'monthly_bill': 150.0,
'peak_power_demand': 6.0,
'scheme': 'NOVA',
'financing': 'Tokenization',
'contract_type': 'DirectPurchase'
}
},
'commercial_ppa': {
'parameters': {
'location': {'latitude': 3.0000, 'longitude': 101.5000},
'tariff_category': 'C1',
'monthly_bill': 3500.0,
'peak_power_demand': 65.0,
'scheme': 'NOVA',
'financing': 'TermLoan',
'contract_type': 'PPA'
}
}
}
return configs
if __name__ == "__main__":
configs = create_sample_configs()
with open('sample_configs.yaml', 'w') as f:
yaml.dump(configs, f, default_flow_style=False)
This comprehensive testing guide ensures robust validation of PVTools functionality, performance, and reliability across different scenarios and configurations.