PatternsTesting Strategies
Performance Testing
Load testing and performance benchmarking for frontend applications
Performance Testing
Performance testing ensures your application can handle load and identifies bottlenecks before they impact users. This covers both frontend performance and API load testing.
Types of Performance Tests
| Type | Purpose | Tools |
|---|---|---|
| Load Testing | Normal expected load | k6, Artillery |
| Stress Testing | Breaking point | k6, Artillery |
| Spike Testing | Sudden traffic surges | k6 |
| Soak Testing | Long-term stability | k6 |
| Frontend Perf | Page speed, vitals | Lighthouse, WebPageTest |
Frontend Performance Testing
Lighthouse CI (Covered in Performance Monitoring)
Already detailed in /docs/patterns/performance-monitoring/lighthouse-ci
WebPageTest API
// scripts/webpagetest.ts
async function runWebPageTest(url: string) {
const apiKey = process.env.WEBPAGETEST_API_KEY;
const response = await fetch(
`https://www.webpagetest.org/runtest.php?url=${url}&k=${apiKey}&f=json&location=Dulles:Chrome&runs=3&fvonly=1`
);
const data = await response.json();
const testId = data.data.testId;
// Poll for results
let result;
do {
await new Promise(resolve => setTimeout(resolve, 10000));
const resultResponse = await fetch(
`https://www.webpagetest.org/jsonResult.php?test=${testId}`
);
result = await resultResponse.json();
} while (result.statusCode !== 200);
const metrics = result.data.median.firstView;
// Assert performance budgets
if (metrics.TTFB > 600) {
throw new Error(`TTFB too slow: ${metrics.TTFB}ms > 600ms`);
}
if (metrics.SpeedIndex > 3000) {
throw new Error(`Speed Index too slow: ${metrics.SpeedIndex} > 3000`);
}
return metrics;
}Load Testing with k6
Installation
# macOS
brew install k6
# Linux
sudo snap install k6
# Windows
choco install k6Basic Load Test
// load-tests/basic.js
import http from 'k6/http';
import { check, sleep } from 'k6';
export const options = {
// Ramp up from 0 to 50 users over 30s
stages: [
{ duration: '30s', target: 50 },
{ duration: '1m', target: 50 },
{ duration: '30s', target: 0 },
],
// Performance thresholds
thresholds: {
http_req_duration: ['p(95)<500'], // 95% < 500ms
http_req_failed: ['rate<0.01'], // < 1% errors
},
};
export default function () {
// Load homepage
const response = http.get('https://myapp.com');
check(response, {
'status is 200': (r) => r.status === 200,
'response time < 500ms': (r) => r.timings.duration < 500,
});
sleep(1);
}API Load Test
// load-tests/api.js
import http from 'k6/http';
import { check, group } from 'k6';
export const options = {
stages: [
{ duration: '1m', target: 100 }, // Ramp up
{ duration: '3m', target: 100 }, // Stay at 100 users
{ duration: '1m', target: 0 }, // Ramp down
],
thresholds: {
'http_req_duration{endpoint:products}': ['p(95)<200'],
'http_req_duration{endpoint:search}': ['p(95)<300'],
'http_req_duration{endpoint:checkout}': ['p(95)<500'],
},
};
const BASE_URL = 'https://api.myapp.com';
const headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer token123',
};
export default function () {
group('Browse Products', () => {
const res = http.get(`${BASE_URL}/api/products`, { headers, tags: { endpoint: 'products' } });
check(res, {
'products loaded': (r) => r.status === 200,
'has products': (r) => JSON.parse(r.body).products.length > 0,
});
});
group('Search', () => {
const res = http.get(`${BASE_URL}/api/search?q=laptop`, { headers, tags: { endpoint: 'search' } });
check(res, {
'search works': (r) => r.status === 200,
});
});
group('Checkout', () => {
const payload = JSON.stringify({
items: [{ id: 1, quantity: 1 }],
payment: { method: 'card' },
});
const res = http.post(`${BASE_URL}/api/checkout`, payload, { headers, tags: { endpoint: 'checkout' } });
check(res, {
'checkout succeeds': (r) => r.status === 200,
});
});
}Stress Testing
// load-tests/stress.js
export const options = {
stages: [
{ duration: '2m', target: 100 }, // Normal load
{ duration: '5m', target: 100 }, // Sustain
{ duration: '2m', target: 200 }, // Increase
{ duration: '5m', target: 200 }, // Sustain
{ duration: '2m', target: 300 }, // Push further
{ duration: '5m', target: 300 }, // Sustain
{ duration: '2m', target: 400 }, // Break it!
{ duration: '5m', target: 400 }, // See if it survives
{ duration: '10m', target: 0 }, // Recovery
],
};
export default function () {
http.get('https://myapp.com/api/data');
}Spike Testing
// load-tests/spike.js
export const options = {
stages: [
{ duration: '10s', target: 100 }, // Normal
{ duration: '1m', target: 100 }, // Sustain
{ duration: '10s', target: 1400 }, // SPIKE!
{ duration: '3m', target: 1400 }, // Sustain spike
{ duration: '10s', target: 100 }, // Back to normal
{ duration: '3m', target: 100 }, // Recover
{ duration: '10s', target: 0 }, // Down
],
};Artillery (Alternative)
Installation
npm install -g artilleryBasic Artillery Test
# artillery.yml
config:
target: 'https://myapp.com'
phases:
- duration: 60
arrivalRate: 10 # 10 users per second
name: "Warm up"
- duration: 300
arrivalRate: 50 # 50 users per second
name: "Load test"
plugins:
expect: {}
scenarios:
- name: "Browse and purchase"
flow:
- get:
url: "/"
expect:
- statusCode: 200
- contentType: text/html
- get:
url: "/api/products"
expect:
- statusCode: 200
- hasProperty: products
- post:
url: "/api/cart"
json:
productId: 1
quantity: 1
expect:
- statusCode: 200
- post:
url: "/api/checkout"
json:
payment: "card"
expect:
- statusCode: 200
- hasProperty: orderId# Run test
artillery run artillery.yml
# Generate report
artillery run --output report.json artillery.yml
artillery report report.jsonBrowser Performance Testing
Playwright Performance
// tests/performance/page-load.spec.ts
import { test, expect } from '@playwright/test';
test('homepage loads within budget', async ({ page }) => {
const startTime = Date.now();
await page.goto('https://myapp.com');
await page.waitForLoadState('networkidle');
const loadTime = Date.now() - startTime;
// Assert performance budget
expect(loadTime).toBeLessThan(3000);
// Get performance metrics
const metrics = await page.evaluate(() => {
const navigation = performance.getEntriesByType('navigation')[0] as PerformanceNavigationTiming;
return {
ttfb: navigation.responseStart - navigation.requestStart,
domContentLoaded: navigation.domContentLoadedEventEnd - navigation.fetchStart,
loadComplete: navigation.loadEventEnd - navigation.fetchStart,
};
});
console.log('Performance Metrics:', metrics);
expect(metrics.ttfb).toBeLessThan(600);
expect(metrics.domContentLoaded).toBeLessThan(2000);
});
test('measures Core Web Vitals', async ({ page }) => {
await page.goto('https://myapp.com');
const vitals = await page.evaluate(() => {
return new Promise((resolve) => {
let lcp = 0;
let cls = 0;
// LCP
new PerformanceObserver((list) => {
const entries = list.getEntries();
lcp = entries[entries.length - 1].startTime;
}).observe({ entryTypes: ['largest-contentful-paint'] });
// CLS
new PerformanceObserver((list) => {
for (const entry of list.getEntries()) {
if (!(entry as any).hadRecentInput) {
cls += (entry as any).value;
}
}
}).observe({ entryTypes: ['layout-shift'] });
setTimeout(() => {
resolve({ lcp, cls });
}, 5000);
});
});
expect(vitals.lcp).toBeLessThan(2500);
expect(vitals.cls).toBeLessThan(0.1);
});CI/CD Integration
# .github/workflows/load-test.yml
name: Load Tests
on:
schedule:
- cron: '0 2 * * *' # Daily at 2 AM
workflow_dispatch: # Manual trigger
jobs:
load-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install k6
run: |
sudo gpg -k
sudo gpg --no-default-keyring --keyring /usr/share/keyrings/k6-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69
echo "deb [signed-by=/usr/share/keyrings/k6-archive-keyring.gpg] https://dl.k6.io/deb stable main" | sudo tee /etc/apt/sources.list.d/k6.list
sudo apt-get update
sudo apt-get install k6
- name: Run load test
run: k6 run load-tests/api.js
- name: Upload results
if: always()
uses: actions/upload-artifact@v3
with:
name: load-test-results
path: summary.jsonMonitoring Integration
// Send k6 metrics to DataDog
import { Trend } from 'k6/metrics';
import http from 'k6/http';
const customMetric = new Trend('custom_metric');
export function handleSummary(data) {
// Send to DataDog
http.post('https://api.datadoghq.com/api/v1/series', JSON.stringify({
series: [
{
metric: 'k6.http_req_duration',
points: [[Date.now() / 1000, data.metrics.http_req_duration.values.avg]],
tags: ['env:production'],
},
],
}), {
headers: {
'DD-API-KEY': __ENV.DATADOG_API_KEY,
'Content-Type': 'application/json',
},
});
return {
'stdout': textSummary(data, { indent: ' ', enableColors: true }),
'summary.json': JSON.stringify(data),
};
}Best Practices
- Test in production-like environment
- Start with realistic scenarios
- Gradually increase load
- Monitor server resources (CPU, memory, DB)
- Test from multiple regions
- Run tests regularly (nightly, weekly)
- Set performance budgets
- Alert on regressions
Performance Budgets
| Metric | Budget | Critical |
|---|---|---|
| TTFB | < 600ms | < 800ms |
| FCP | < 1.8s | < 3s |
| LCP | < 2.5s | < 4s |
| API p95 | < 200ms | < 500ms |
| Error Rate | < 0.1% | < 1% |
Common Pitfalls
❌ Testing only from one location
✅ Test from user regions
❌ Unrealistic scenarios
✅ Model actual user behavior
❌ Testing only happy paths
✅ Include error scenarios
❌ Ignoring warm-up
✅ Ramp up gradually
❌ No baseline measurements
✅ Track trends over time
Performance testing catches scalability issues before users do—make it part of your deployment pipeline.