Overview
Nginx rate limiting uses a leaky bucket algorithm. Requests that exceed the rate are held in a burst queue; once the burst is exhausted Nginx returns 429 Too Many Requests.
nginx.conf (http block)
http {
# Zone for anonymous API callers - keyed by IP
limit_req_zone $binary_remote_addr zone=api_anon:10m rate=10r/m;
# Zone for authenticated users - keyed by JWT sub claim
# Requires nginx-plus or lua-resty-jwt; simpler: key by cookie/header
limit_req_zone $http_x_user_id zone=api_auth:20m rate=120r/m;
# Zone for login endpoints (brute-force protection)
limit_req_zone $binary_remote_addr zone=login:10m rate=5r/m;
limit_req_status 429; # default is 503 - 429 is semantically correct
limit_conn_status 429;
}
sites-available/api.conf
server {
listen 443 ssl http2;
server_name api.example.com;
# ── Auth endpoints (strict) ────────────────────────────────
location /auth/ {
limit_req zone=login burst=3 nodelay;
proxy_pass http://auth_backend;
}
# ── Authenticated API users ────────────────────────────────
location /api/v1/ {
# Use x-user-id header set by upstream auth middleware
limit_req zone=api_auth burst=20 nodelay;
proxy_pass http://api_backend;
}
# ── Public / anonymous endpoints ──────────────────────────
location /api/public/ {
limit_req zone=api_anon burst=5 nodelay;
proxy_pass http://api_backend;
}
# ── Custom 429 JSON response ──────────────────────────────
error_page 429 /429.json;
location = /429.json {
internal;
default_type application/json;
return 429 '{"error":"rate_limit_exceeded","message":"Too many requests. Please slow down."}';
}
}
Whitelist internal IPs
geo $rate_limit_key {
default $binary_remote_addr;
10.0.0.0/8 "";
172.16.0.0/12 "";
192.168.0.0/16 "";
}
limit_req_zone $rate_limit_key zone=api_anon:10m rate=10r/m;
# Empty key = no rate limiting for private IPs
Testing
# Fire 20 requests quickly - expect 429 after burst
for i in $(seq 1 20); do
curl -s -o /dev/null -w "%{http_code}\n" https://api.example.com/api/public/ping
done