Implement database observability: MySQL performance_schema, PostgreSQL pg_stat_statements, key metrics (QPS/latency/connections/buffer hit rate), and Prometheus exporters.
Prometheus exporter: all MySQL global status variables
postgres_exporter
Prometheus exporter: pg_stat_* views as metrics
Grafana dashboards
Pre-built at grafana.com/grafana/dashboards
SLO
p99 < 100ms OLTP; availability 99.99%; buffer hit > 99%
💡 Architect's insight: The most important metric is query latency distribution (p50/p95/p99), not averages. A 1-second average can hide 5% of queries taking 20 seconds. Use pg_stat_statements histogram or slow query log for percentiles.
docker run -d --name mysql-lab -e MYSQL_ROOT_PASSWORD=rootpass mysql:8.0
for i in $(seq 1 30); do docker exec mysql-lab mysql -uroot -prootpass -e "SELECT 1" 2>/dev/null && break || sleep 2; done
docker exec mysql-lab mysql -uroot -prootpass -e "
-- performance_schema is enabled by default in MySQL 8.0
SELECT variable_value FROM performance_schema.global_variables
WHERE variable_name = 'performance_schema';
-- Enable statement digests (needed for top queries)
UPDATE performance_schema.setup_consumers
SET ENABLED = 'YES'
WHERE NAME IN ('events_statements_current',
'events_statements_history',
'events_statements_history_long');
UPDATE performance_schema.setup_instruments
SET ENABLED = 'YES', TIMED = 'YES'
WHERE NAME LIKE 'statement/%';
SELECT 'performance_schema enabled' AS status;
"
+----------------+
| variable_value |
+----------------+
| ON |
+----------------+
status
---------------------------
performance_schema enabled
docker exec mysql-lab mysql -uroot -prootpass -e "
-- Create test schema
CREATE DATABASE IF NOT EXISTS shopdb;
USE shopdb;
CREATE TABLE IF NOT EXISTS products (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(100),
price DECIMAL(10,2),
category VARCHAR(50),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
INDEX idx_category (category),
INDEX idx_price (price)
);
-- Insert test data
INSERT INTO products (name, price, category)
SELECT CONCAT('Product-', n),
ROUND(RAND() * 1000, 2),
ELT(1 + FLOOR(RAND() * 5), 'Electronics','Books','Clothing','Food','Sports')
FROM (SELECT @rownum := @rownum + 1 AS n FROM
(SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4 UNION SELECT 5) a,
(SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4 UNION SELECT 5) b,
(SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4) c,
(SELECT @rownum := 0) r
) nums LIMIT 500;
"
# Run various queries to populate performance_schema
docker exec mysql-lab mysql -uroot -prootpass shopdb -e "
SELECT * FROM products WHERE category = 'Electronics' AND price > 500;
SELECT * FROM products ORDER BY price DESC LIMIT 10;
SELECT category, COUNT(*) as count, AVG(price) as avg_price FROM products GROUP BY category;
SELECT p1.* FROM products p1 JOIN products p2 ON p1.category = p2.category WHERE p1.price > 800;
SELECT COUNT(*) FROM products WHERE name LIKE '%Product-1%';
"
# Find top queries by execution count and total time
docker exec mysql-lab mysql -uroot -prootpass -e "
SELECT
SUBSTR(DIGEST_TEXT, 1, 80) AS query,
COUNT_STAR AS exec_count,
ROUND(AVG_TIMER_WAIT/1000000000, 3) AS avg_ms,
ROUND(SUM_TIMER_WAIT/1000000000, 3) AS total_ms,
ROUND(SUM_ROWS_EXAMINED/COUNT_STAR) AS avg_rows_examined,
ROUND(SUM_ROWS_SENT/COUNT_STAR) AS avg_rows_sent,
SUM_NO_INDEX_USED AS no_index_count
FROM performance_schema.events_statements_summary_by_digest
WHERE SCHEMA_NAME = 'shopdb'
ORDER BY SUM_TIMER_WAIT DESC
LIMIT 10;
" 2>/dev/null
docker exec mysql-lab mysql -uroot -prootpass -e "
-- === GLOBAL STATUS SNAPSHOT ===
SELECT variable_name, variable_value
FROM performance_schema.global_status
WHERE variable_name IN (
'Queries',
'Questions',
'Threads_connected',
'Threads_running',
'Slow_queries',
'Innodb_buffer_pool_read_requests',
'Innodb_buffer_pool_reads',
'Innodb_row_lock_waits',
'Com_select',
'Com_insert',
'Com_update',
'Com_delete'
)
ORDER BY variable_name;
-- Buffer pool hit rate (should be > 99%)
SELECT
ROUND(100 * (1 - (
(SELECT variable_value FROM performance_schema.global_status WHERE variable_name = 'Innodb_buffer_pool_reads') /
NULLIF((SELECT variable_value FROM performance_schema.global_status WHERE variable_name = 'Innodb_buffer_pool_read_requests'), 0)
)), 2) AS buffer_pool_hit_rate_pct;
-- Current active connections
SELECT user, db, command, time, state, SUBSTR(info, 1, 50) AS query
FROM information_schema.processlist
WHERE command != 'Sleep'
ORDER BY time DESC;
-- InnoDB status summary
SHOW ENGINE INNODB STATUS\G
" 2>/dev/null | head -80
docker run -d --name pg-lab -e POSTGRES_PASSWORD=rootpass postgres:15
sleep 10
docker exec -i pg-lab psql -U postgres << 'SQL'
-- Enable pg_stat_statements
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
-- Verify
SELECT extversion FROM pg_extension WHERE extname = 'pg_stat_statements';
-- Create test schema and data
CREATE TABLE orders (
id SERIAL PRIMARY KEY,
user_id INT,
total DECIMAL(10,2),
status VARCHAR(20),
created_at TIMESTAMPTZ DEFAULT NOW()
);
INSERT INTO orders (user_id, total, status)
SELECT (random()*100)::INT, (random()*1000)::DECIMAL(10,2),
CASE (random()*3)::INT WHEN 0 THEN 'pending' WHEN 1 THEN 'shipped' ELSE 'delivered' END
FROM generate_series(1, 10000);
CREATE INDEX idx_orders_user ON orders(user_id);
CREATE INDEX idx_orders_status ON orders(status, created_at);
-- Run various queries
SELECT * FROM orders WHERE user_id = 42;
SELECT user_id, COUNT(*), SUM(total) FROM orders GROUP BY user_id ORDER BY COUNT(*) DESC;
SELECT * FROM orders WHERE status = 'pending' AND created_at > NOW() - INTERVAL '7 days';
SELECT * FROM orders WHERE total > 900 ORDER BY total DESC LIMIT 10;
-- Top queries by total execution time
SELECT
LEFT(query, 70) AS query,
calls,
ROUND(mean_exec_time::NUMERIC, 3) AS avg_ms,
ROUND(total_exec_time::NUMERIC, 2) AS total_ms,
ROUND(stddev_exec_time::NUMERIC, 3) AS stddev_ms,
ROUND(100.0 * total_exec_time / SUM(total_exec_time) OVER (), 2) AS pct_total
FROM pg_stat_statements
WHERE query NOT LIKE '%pg_stat%'
ORDER BY total_exec_time DESC
LIMIT 8;
SQL
docker exec -i pg-lab psql -U postgres << 'SQL'
-- Database statistics
SELECT
datname AS database,
numbackends AS connections,
xact_commit AS commits,
xact_rollback AS rollbacks,
blks_read,
blks_hit,
ROUND(100.0 * blks_hit / NULLIF(blks_hit + blks_read, 0), 2) AS cache_hit_rate,
tup_returned AS rows_returned,
tup_fetched AS rows_fetched,
tup_inserted,
tup_updated,
tup_deleted
FROM pg_stat_database
WHERE datname = 'postgres';
-- Table-level stats (bloat, sequential scans)
SELECT
relname AS table,
seq_scan AS full_scans,
seq_tup_read AS rows_via_seqscan,
idx_scan AS index_scans,
n_tup_ins AS inserts,
n_tup_upd AS updates,
n_tup_del AS deletes,
n_dead_tup AS dead_rows,
ROUND(100.0 * n_dead_tup / NULLIF(n_live_tup + n_dead_tup, 0), 1) AS dead_row_pct
FROM pg_stat_user_tables
ORDER BY seq_scan DESC;
-- Active connections & queries
SELECT pid, usename, application_name, client_addr,
state, ROUND(EXTRACT(EPOCH FROM NOW() - query_start)::NUMERIC, 1) AS query_seconds,
LEFT(query, 60) AS query
FROM pg_stat_activity
WHERE state != 'idle'
AND query NOT LIKE '%pg_stat_activity%';
SQL