MySQL作为最流行的关系型数据库,其性能优化是后端开发的核心技能。本文从建表设计到查询优化的全方位视角,为你打造MySQL高性能优化全攻略。

🏗️ 建表设计优化

1. 数据类型优化

🎯 选择合适的数据类型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
-- ❌ 不推荐:使用大字段存储小数据
CREATE TABLE user_old (
id BIGINT PRIMARY KEY,
age VARCHAR(100), -- 浪费空间
status VARCHAR(50), -- 浪费空间
score DECIMAL(10,8) -- 精度过高
);

-- ✅ 推荐:精确匹配数据类型
CREATE TABLE user_optimized (
id BIGINT PRIMARY KEY,
age TINYINT UNSIGNED, -- 0-255足够
status ENUM('active', 'inactive', 'banned'), -- 节省空间
score DECIMAL(5,2) -- 精确到分
);

📊 存储空间对比

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
-- 数据类型存储空间优化
-- INT vs BIGINT:节省50%空间
-- VARCHAR(100) vs ENUM:节省80%空间
-- DECIMAL(10,8) vs DECIMAL(5,2):节省60%空间

-- 时间类型优化
CREATE TABLE orders (
id BIGINT PRIMARY KEY,
user_id BIGINT NOT NULL,
amount DECIMAL(10,2) NOT NULL,
status TINYINT NOT NULL, -- 1:待支付 2:已支付 3:已发货 4:已完成

-- 时间字段使用合适精度
created_at DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
updated_at DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
paid_at TIMESTAMP(3) NULL,

INDEX idx_user_status (user_id, status),
INDEX idx_created_at (created_at),
INDEX idx_updated_at (updated_at)
);

2. 表结构设计原则

🗂️ 范式设计与反范式平衡

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
-- 范式设计:用户信息表
CREATE TABLE user_info (
user_id BIGINT PRIMARY KEY,
username VARCHAR(50) NOT NULL UNIQUE,
email VARCHAR(100) NOT NULL UNIQUE,
phone VARCHAR(20),
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,

INDEX idx_username (username),
INDEX idx_email (email)
);

-- 反范式设计:订单聚合表(查询优化)
CREATE TABLE order_summary (
order_id BIGINT PRIMARY KEY,
user_id BIGINT NOT NULL,
username VARCHAR(50) NOT NULL, -- 冗余用户名
user_email VARCHAR(100) NOT NULL, -- 冗余邮箱
product_count INT NOT NULL, -- 聚合字段
total_amount DECIMAL(10,2) NOT NULL,-- 聚合字段
status TINYINT NOT NULL,
created_at DATETIME NOT NULL,

INDEX idx_user_created (user_id, created_at),
INDEX idx_status_created (status, created_at)
);

🔗 外键约束优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
-- 开发环境:使用外键保证数据完整性
CREATE TABLE order_items (
id BIGINT PRIMARY KEY,
order_id BIGINT NOT NULL,
product_id BIGINT NOT NULL,
quantity INT NOT NULL,
price DECIMAL(10,2) NOT NULL,

FOREIGN KEY (order_id) REFERENCES orders(id) ON DELETE CASCADE,
FOREIGN KEY (product_id) REFERENCES products(id) ON DELETE RESTRICT,

INDEX idx_order_product (order_id, product_id)
);

-- 生产环境:移除外键,依赖应用层保证数据一致性
CREATE TABLE order_items_prod (
id BIGINT PRIMARY KEY,
order_id BIGINT NOT NULL,
product_id BIGINT NOT NULL,
quantity INT NOT NULL,
price DECIMAL(10,2) NOT NULL,

INDEX idx_order_id (order_id),
INDEX idx_product_id (product_id),
INDEX idx_order_product (order_id, product_id)
);

3. 分区表设计

📅 时间分区

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
-- 按月分区订单表
CREATE TABLE orders_partitioned (
id BIGINT PRIMARY KEY,
user_id BIGINT NOT NULL,
amount DECIMAL(10,2) NOT NULL,
status TINYINT NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL
)
PARTITION BY RANGE (YEAR(created_at)) (
PARTITION p2023 VALUES LESS THAN (2024),
PARTITION p2024 VALUES LESS THAN (2025),
PARTITION p2025 VALUES LESS THAN (2026),
PARTITION p_future VALUES LESS THAN MAXVALUE
);

-- 按月分区(更细粒度)
CREATE TABLE orders_monthly (
id BIGINT PRIMARY KEY,
user_id BIGINT NOT NULL,
amount DECIMAL(10,2) NOT NULL,
status TINYINT NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL
)
PARTITION BY RANGE (UNIX_TIMESTAMP(created_at)) (
PARTITION p202401 VALUES LESS THAN (UNIX_TIMESTAMP('2024-02-01')),
PARTITION p202402 VALUES LESS THAN (UNIX_TIMESTAMP('2024-03-01')),
PARTITION p202403 VALUES LESS THAN (UNIX_TIMESTAMP('2024-04-01')),
-- ... 更多分区
PARTITION p_future VALUES LESS THAN MAXVALUE
);

🏷️ 哈希分区

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
-- 用户ID哈希分区
CREATE TABLE user_logs (
id BIGINT PRIMARY KEY,
user_id BIGINT NOT NULL,
action VARCHAR(100) NOT NULL,
data JSON,
created_at DATETIME NOT NULL
)
PARTITION BY HASH(user_id)
PARTITIONS 16;

-- 复合分区:时间 + 哈希
CREATE TABLE audit_logs (
id BIGINT PRIMARY KEY,
table_name VARCHAR(50) NOT NULL,
record_id BIGINT NOT NULL,
action VARCHAR(20) NOT NULL,
old_data JSON,
new_data JSON,
user_id BIGINT,
created_at DATETIME NOT NULL
)
PARTITION BY RANGE (YEAR(created_at))
SUBPARTITION BY HASH(record_id) (
PARTITION p2024 VALUES LESS THAN (2025) (
SUBPARTITION p2024_h0,
SUBPARTITION p2024_h1,
SUBPARTITION p2024_h2,
SUBPARTITION p2024_h3
),
PARTITION p2025 VALUES LESS THAN (2026) (
SUBPARTITION p2025_h0,
SUBPARTITION p2025_h1,
SUBPARTITION p2025_h2,
SUBPARTITION p2025_h3
)
);

🔍 索引优化策略

1. 索引类型选择

📚 B-Tree索引

1
2
3
4
5
6
7
8
9
10
11
12
13
-- 单列索引
CREATE INDEX idx_user_email ON users(email);

-- 复合索引(最左前缀原则)
CREATE INDEX idx_order_user_created ON orders(user_id, created_at);
CREATE INDEX idx_product_category_price ON products(category_id, price);

-- 覆盖索引(包含查询所需的所有列)
CREATE INDEX idx_user_covering ON users(email, username, status, last_login)
WHERE status = 'active';

-- 函数索引
CREATE INDEX idx_email_domain ON users((SUBSTRING_INDEX(email, '@', -1)));

🔗 全文索引

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
-- 全文索引用于文本搜索
CREATE TABLE articles (
id BIGINT PRIMARY KEY,
title VARCHAR(200) NOT NULL,
content TEXT NOT NULL,
tags VARCHAR(500),
author_id BIGINT NOT NULL,
created_at DATETIME NOT NULL,

FULLTEXT idx_title_content (title, content),
FULLTEXT idx_tags (tags),

INDEX idx_author_created (author_id, created_at)
);

-- 全文搜索查询
SELECT * FROM articles
WHERE MATCH(title, content) AGAINST('MySQL 优化' IN NATURAL LANGUAGE MODE)
ORDER BY MATCH(title, content) AGAINST('MySQL 优化') DESC;

-- 布尔模式搜索
SELECT * FROM articles
WHERE MATCH(title, content) AGAINST('+MySQL -Oracle' IN BOOLEAN MODE);

🏷️ 空间索引

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
-- 空间数据表
CREATE TABLE locations (
id BIGINT PRIMARY KEY,
name VARCHAR(100) NOT NULL,
coordinates POINT NOT NULL, -- MySQL 8.0+ 原生支持
address VARCHAR(255),

SPATIAL INDEX idx_coordinates (coordinates)
);

-- 空间查询
SELECT name, ST_Distance(coordinates, ST_GeomFromText('POINT(116.4074 39.9042)')) AS distance
FROM locations
ORDER BY distance
LIMIT 10;

-- 范围查询
SELECT * FROM locations
WHERE ST_Contains(
ST_GeomFromText('POLYGON((116.3 39.8, 116.5 39.8, 116.5 40.0, 116.3 40.0, 116.3 39.8))'),
coordinates
);

2. 索引维护优化

📊 索引使用统计

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
-- 查看索引使用情况
SELECT
TABLE_SCHEMA,
TABLE_NAME,
INDEX_NAME,
CARDINALITY,
PAGES,
FILTER_CONDITION,
SEQ_IN_INDEX
FROM INFORMATION_SCHEMA.STATISTICS
WHERE TABLE_SCHEMA = 'your_database'
ORDER BY TABLE_NAME, SEQ_IN_INDEX;

-- 索引使用频率统计
SELECT
object_schema,
object_name,
index_name,
count_read,
count_write,
count_fetch,
date_created
FROM performance_schema.table_io_waits_summary_by_index_usage
WHERE object_schema = 'your_database'
ORDER BY count_read DESC;

🔧 索引重建优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
-- 在线重建索引(MySQL 8.0+)
ALTER TABLE users DROP INDEX idx_email, ADD INDEX idx_email (email);

-- 重建所有索引
ALTER TABLE large_table ENGINE = InnoDB;

-- 索引碎片整理
OPTIMIZE TABLE users;

-- 批量重建索引存储过程
DELIMITER //
CREATE PROCEDURE rebuild_table_indexes(IN table_name VARCHAR(100))
BEGIN
DECLARE done INT DEFAULT FALSE;
DECLARE idx_name VARCHAR(100);
DECLARE idx_cols TEXT;
DECLARE cur CURSOR FOR
SELECT INDEX_NAME, GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX)
FROM INFORMATION_SCHEMA.STATISTICS
WHERE TABLE_SCHEMA = DATABASE()
AND TABLE_NAME = table_name
AND INDEX_NAME != 'PRIMARY'
GROUP BY INDEX_NAME;

DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE;

OPEN cur;

rebuild_loop: LOOP
FETCH cur INTO idx_name, idx_cols;
IF done THEN
LEAVE rebuild_loop;
END IF;

SET @sql = CONCAT('ALTER TABLE ', table_name, ' DROP INDEX ', idx_name, ', ADD INDEX ', idx_name, ' (', idx_cols, ')');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
END LOOP;

CLOSE cur;
END //
DELIMITER ;

⚡ 查询优化技巧

1. 查询重写优化

🔄 子查询优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
-- ❌ 低效子查询
SELECT u.username, u.email
FROM users u
WHERE u.id IN (
SELECT DISTINCT user_id
FROM orders
WHERE created_at >= '2024-01-01'
);

-- ✅ 优化为JOIN
SELECT DISTINCT u.username, u.email
FROM users u
INNER JOIN orders o ON u.id = o.user_id
WHERE o.created_at >= '2024-01-01';

-- ❌ 标量子查询
SELECT o.id, o.amount,
(SELECT COUNT(*) FROM order_items WHERE order_id = o.id) as item_count
FROM orders o;

-- ✅ 优化为LEFT JOIN
SELECT o.id, o.amount, COUNT(oi.id) as item_count
FROM orders o
LEFT JOIN order_items oi ON o.id = oi.order_id
GROUP BY o.id, o.amount;

📊 分页查询优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
-- ❌ 传统分页(性能随页数下降)
SELECT * FROM orders
ORDER BY created_at DESC
LIMIT 100000, 20;

-- ✅ 延迟关联优化
SELECT o.*
FROM orders o
INNER JOIN (
SELECT id FROM orders
ORDER BY created_at DESC
LIMIT 100000, 20
) t ON o.id = t.id
ORDER BY o.created_at DESC;

-- ✅ 书签方式分页
SELECT * FROM orders
WHERE created_at < '2024-01-01 00:00:00'
ORDER BY created_at DESC
LIMIT 20;

2. 连接查询优化

🔗 连接顺序优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
-- 明确连接顺序(小表在前)
EXPLAIN SELECT u.username, COUNT(o.id) as order_count
FROM users u
STRAIGHT_JOIN orders o ON u.id = o.user_id -- 强制连接顺序
WHERE u.created_at >= '2024-01-01'
GROUP BY u.id, u.username;

-- 连接缓冲优化
SET optimizer_switch = 'join_buffer_size=256K';

-- 嵌套循环连接优化
SELECT /*+ JOIN_ORDER(u,o) */ u.username, o.amount
FROM users u
JOIN orders o ON u.id = o.user_id
WHERE u.status = 'active';

📈 连接算法选择

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
-- 强制使用哈希连接(MySQL 8.0.18+)
SELECT /*+ HASH_JOIN(u,o) */ u.username, COUNT(o.id)
FROM users u
JOIN orders o ON u.id = o.user_id
GROUP BY u.username;

-- 强制使用合并连接
SELECT /*+ MERGE_JOIN(u,o) */ u.username, o.amount
FROM users u
JOIN orders o ON u.id = o.user_id
WHERE u.created_at >= '2024-01-01';

-- 强制使用索引连接
SELECT /*+ INDEX_JOIN(u idx_user_status) */ u.username
FROM users u
WHERE u.status = 'active';

3. 聚合查询优化

📊 GROUP BY优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
-- 宽索引优化GROUP BY
CREATE INDEX idx_user_status_created_amount ON orders(user_id, status, created_at, amount);

SELECT user_id,
COUNT(*) as order_count,
SUM(amount) as total_amount,
AVG(amount) as avg_amount
FROM orders
WHERE status = 2 AND created_at >= '2024-01-01'
GROUP BY user_id;

-- 松散索引扫描(MySQL 8.0+)
SET optimizer_switch = 'loose_scan=on';

SELECT user_id, COUNT(*)
FROM orders
GROUP BY user_id;

🎯 窗口函数优化

1
2
3
4
5
6
7
8
9
10
11
12
13
-- 传统自连接方式(低效)
SELECT o1.id, o1.amount, o1.created_at,
SUM(o2.amount) as running_total
FROM orders o1
JOIN orders o2 ON o1.user_id = o2.user_id
AND o2.created_at <= o1.created_at
GROUP BY o1.id, o1.amount, o1.created_at;

-- 窗口函数优化(高效)
SELECT id, amount, created_at,
SUM(amount) OVER (PARTITION BY user_id ORDER BY created_at) as running_total,
ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY created_at) as order_seq
FROM orders;

🏗️ 数据库结构优化

1. 读写分离架构

🔄 主从复制配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# docker-compose.yml - MySQL主从复制
version: '3.8'
services:
mysql-master:
image: mysql:8.0
environment:
MYSQL_ROOT_PASSWORD: root123
MYSQL_DATABASE: myapp
MYSQL_USER: repl
MYSQL_PASSWORD: repl123
volumes:
- ./master/my.cnf:/etc/mysql/my.cnf
- master_data:/var/lib/mysql
ports:
- "3306:3306"

mysql-slave:
image: mysql:8.0
environment:
MYSQL_ROOT_PASSWORD: root123
MYSQL_DATABASE: myapp
MYSQL_USER: repl
MYSQL_PASSWORD: repl123
volumes:
- ./slave/my.cnf:/etc/mysql/my.cnf
- slave_data:/var/lib/mysql
ports:
- "3307:3306"
depends_on:
- mysql-master

📝 读写分离配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
// Spring Boot读写分离配置
@Configuration
public class DataSourceConfig {

@Bean
@ConfigurationProperties(prefix = "spring.datasource.master")
public DataSource masterDataSource() {
return DataSourceBuilder.create().build();
}

@Bean
@ConfigurationProperties(prefix = "spring.datasource.slave")
public DataSource slaveDataSource() {
return DataSourceBuilder.create().build();
}

@Bean
public DataSource routingDataSource(@Qualifier("masterDataSource") DataSource master,
@Qualifier("slaveDataSource") DataSource slave) {
Map<Object, Object> targetDataSources = new HashMap<>();
targetDataSources.put("master", master);
targetDataSources.put("slave", slave);

RoutingDataSource routingDataSource = new RoutingDataSource();
routingDataSource.setDefaultTargetDataSource(master);
routingDataSource.setTargetDataSources(targetDataSources);
return routingDataSource;
}
}

// 动态数据源路由
public class RoutingDataSource extends AbstractRoutingDataSource {

@Override
protected Object determineCurrentLookupKey() {
return TransactionSynchronizationManager.isActualTransactionActive() ? "master" : "slave";
}
}

2. 分库分表设计

🗂️ 垂直分库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
-- 用户库
CREATE DATABASE user_db;
USE user_db;

CREATE TABLE users (
id BIGINT PRIMARY KEY,
username VARCHAR(50) UNIQUE NOT NULL,
email VARCHAR(100) UNIQUE NOT NULL,
password_hash VARCHAR(255) NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL
);

-- 订单库
CREATE DATABASE order_db;
USE order_db;

CREATE TABLE orders (
id BIGINT PRIMARY KEY,
user_id BIGINT NOT NULL,
amount DECIMAL(10,2) NOT NULL,
status TINYINT NOT NULL,
created_at DATETIME NOT NULL
);

-- 商品库
CREATE DATABASE product_db;
USE product_db;

CREATE TABLE products (
id BIGINT PRIMARY KEY,
name VARCHAR(200) NOT NULL,
price DECIMAL(10,2) NOT NULL,
category_id BIGINT NOT NULL,
stock INT NOT NULL
);

📊 水平分表

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
-- 用户分表策略(按用户ID取模)
DELIMITER //
CREATE PROCEDURE create_user_shards()
BEGIN
DECLARE i INT DEFAULT 0;
DECLARE shard_name VARCHAR(50);

WHILE i < 16 DO
SET shard_name = CONCAT('users_', LPAD(i, 2, '0'));
SET @sql = CONCAT('
CREATE TABLE ', shard_name, ' (
id BIGINT PRIMARY KEY,
username VARCHAR(50) UNIQUE NOT NULL,
email VARCHAR(100) UNIQUE NOT NULL,
password_hash VARCHAR(255) NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME NOT NULL,
INDEX idx_username (username),
INDEX idx_email (email)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
');
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;

SET i = i + 1;
END WHILE;
END //
DELIMITER ;

-- 分表路由函数
DELIMITER //
CREATE FUNCTION get_user_shard(user_id BIGINT) RETURNS VARCHAR(50)
DETERMINISTIC
BEGIN
DECLARE shard_num INT;
SET shard_num = user_id % 16;
RETURN CONCAT('users_', LPAD(shard_num, 2, '0'));
END //
DELIMITER ;

📈 性能监控与诊断

1. 慢查询分析

🔍 慢查询日志配置

1
2
3
4
5
6
7
8
9
10
11
-- 启用慢查询日志
SET GLOBAL slow_query_log = 'ON';
SET GLOBAL slow_query_log_file = '/var/log/mysql/mysql-slow.log';
SET GLOBAL long_query_time = 2; -- 2秒以上的查询记为慢查询

-- 记录未使用索引的查询
SET GLOBAL log_queries_not_using_indexes = 'ON';

-- 查看慢查询配置
SHOW VARIABLES LIKE '%slow%';
SHOW VARIABLES LIKE '%log%';

📊 慢查询分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
-- 查看慢查询统计
SELECT sql_text,
exec_count,
avg_timer_wait/1000000000 as avg_time_sec,
sum_timer_wait/1000000000 as total_time_sec,
avg_rows_examined,
avg_rows_sent
FROM performance_schema.events_statements_summary_by_digest
WHERE sum_timer_wait > 1000000000000 -- 总执行时间超过1秒
ORDER BY avg_timer_wait DESC
LIMIT 20;

-- 分析具体慢查询
EXPLAIN SELECT u.username, COUNT(o.id) as order_count
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
WHERE u.created_at >= '2024-01-01'
GROUP BY u.id, u.username;

-- 使用Profile分析查询
SET profiling = 1;
SELECT * FROM large_table WHERE complex_condition;
SHOW PROFILE FOR QUERY 1;

2. 性能监控指标

📈 核心性能指标

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
-- QPS/TPS监控
SELECT
variable_name,
variable_value
FROM performance_schema.global_status
WHERE variable_name IN (
'Queries',
'Com_select',
'Com_insert',
'Com_update',
'Com_delete',
'Innodb_row_lock_waits',
'Innodb_row_lock_time'
);

-- 连接池监控
SHOW PROCESSLIST;

SELECT
user,
host,
db,
command,
time,
state,
info
FROM information_schema.processlist
WHERE command != 'Sleep'
ORDER BY time DESC
LIMIT 20;

🔬 InnoDB引擎监控

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
-- InnoDB状态监控
SHOW ENGINE INNODB STATUS\G

-- 缓冲池监控
SELECT
pool_id,
pool_size,
free_buffers,
database_pages,
old_database_pages,
modified_database_pages,
pages_created,
pages_written,
pages_read
FROM information_schema.innodb_buffer_pool_stats;

-- 锁监控
SELECT
lock_id,
lock_trx_id,
lock_mode,
lock_type,
lock_table,
lock_index,
lock_space,
lock_page,
lock_rec,
lock_data
FROM performance_schema.data_locks
WHERE lock_type != 'TABLE'
ORDER BY lock_trx_id;

🛠️ 常见问题排查

1. 死锁问题诊断

🔒 死锁检测与分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
-- 启用死锁检测
SET GLOBAL innodb_print_all_deadlocks = ON;

-- 查看最近死锁信息
SHOW ENGINE INNODB STATUS;

-- 分析死锁日志
SELECT
trx_id,
trx_state,
trx_started,
trx_wait_started,
trx_mysql_thread_id,
trx_query,
trx_lock_structs,
trx_lock_memory_bytes,
trx_rows_locked,
trx_rows_modified
FROM information_schema.innodb_trx
WHERE trx_state = 'LOCK WAIT';

🛡️ 死锁预防策略

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
-- 1. 统一加锁顺序
START TRANSACTION;

-- 总是先锁定用户表,再锁定订单表
SELECT * FROM users WHERE id = ? FOR UPDATE;
SELECT * FROM orders WHERE user_id = ? FOR UPDATE;

-- 业务逻辑
UPDATE users SET balance = balance - ? WHERE id = ?;
INSERT INTO orders (user_id, amount, status) VALUES (?, ?, 1);

COMMIT;

-- 2. 使用较低隔离级别
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;

-- 3. 缩短事务时间
-- 将大事务拆分为多个小事务

2. 内存问题排查

🧠 内存使用分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
-- 查看内存配置
SHOW VARIABLES LIKE '%buffer%';
SHOW VARIABLES LIKE '%cache%';
SHOW VARIABLES LIKE '%memory%';

-- InnoDB缓冲池使用情况
SELECT
(pages_total * 16384)/1024/1024 as buffer_pool_size_mb,
(pages_dirty * 16384)/1024/1024 as dirty_pages_mb,
(pages_free * 16384)/1024/1024 as free_pages_mb,
pages_total,
pages_dirty,
pages_free
FROM information_schema.innodb_buffer_pool_stats;

-- 连接内存使用
SELECT
thread_id,
user,
host,
db,
command,
time,
state,
memory_used
FROM performance_schema.threads
WHERE type = 'FOREGROUND'
ORDER BY memory_used DESC
LIMIT 10;

🔧 内存优化配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# my.cnf - 内存优化配置
[mysqld]
# InnoDB缓冲池 (设置为系统内存的70%)
innodb_buffer_pool_size = 4G

# 查询缓存 (MySQL 8.0已移除)
# query_cache_size = 256M

# 排序缓冲区
sort_buffer_size = 2M

# 随机读取缓冲区
read_rnd_buffer_size = 2M

# 连接缓冲区
join_buffer_size = 2M

# 临时表大小
tmp_table_size = 128M
max_heap_table_size = 128M

# MyISAM键缓冲区
key_buffer_size = 256M

# 表缓存
table_open_cache = 4096
table_definition_cache = 4096

3. 高并发问题优化

🚀 连接池优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
// HikariCP高并发配置
@Configuration
public class DataSourceConfig {

@Bean
@ConfigurationProperties(prefix = "spring.datasource.hikari")
public HikariDataSource dataSource() {
HikariConfig config = new HikariConfig();

// 连接池大小 (根据CPU核心数设置)
config.setMaximumPoolSize(Runtime.getRuntime().availableProcessors() * 2 + 1);
config.setMinimumIdle(Runtime.getRuntime().availableProcessors());

// 连接超时
config.setConnectionTimeout(30000);
config.setIdleTimeout(600000);
config.setMaxLifetime(1800000);

// 连接测试
config.setConnectionTestQuery("SELECT 1");
config.setValidationTimeout(5000);

// 连接泄漏检测
config.setLeakDetectionThreshold(60000);

return new HikariDataSource(config);
}
}

⚡ 缓存策略优化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
// 多级缓存架构
@Service
@Slf4j
public class CacheService {

@Autowired
private RedisTemplate<String, Object> redisTemplate;

@Autowired
private Caffeine<Object, Object> caffeineCache;

@Cacheable(value = "user", key = "#userId", unless = "#result == null")
public User getUser(Long userId) {
// 多级缓存查询
User user = getFromLocalCache(userId);
if (user != null) {
return user;
}

user = getFromRedis(userId);
if (user != null) {
// 回填本地缓存
caffeineCache.put(userId, user);
return user;
}

// 从数据库查询
user = userRepository.findById(userId).orElse(null);
if (user != null) {
// 回填多级缓存
redisTemplate.opsForValue().set("user:" + userId, user, 30, TimeUnit.MINUTES);
caffeineCache.put(userId, user);
}

return user;
}

// 缓存预热
@Scheduled(fixedRate = 1800000) // 30分钟预热一次
public void warmUpCache() {
log.info("开始缓存预热");

// 预热热门用户数据
List<Long> hotUserIds = userRepository.findHotUserIds();
hotUserIds.forEach(userId -> {
User user = userRepository.findById(userId).orElse(null);
if (user != null) {
redisTemplate.opsForValue().set("user:" + userId, user, 30, TimeUnit.MINUTES);
}
});

log.info("缓存预热完成,共预热{}个用户", hotUserIds.size());
}
}

🎯 最佳实践总结

1. 设计阶段优化

🏗️ 建表三原则

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
-- 1. 选择最小合适数据类型
CREATE TABLE optimized_table (
id BIGINT PRIMARY KEY, -- 8字节,足够大
status TINYINT NOT NULL, -- 1字节,节省空间
amount DECIMAL(10,2) NOT NULL, -- 精确到分
created_at DATETIME(3) NOT NULL, -- 微秒精度
tags JSON, -- 灵活扩展
INDEX idx_status_created (status, created_at)
);

-- 2. 预留扩展字段,避免频繁DDL
ALTER TABLE users ADD COLUMN ext_info JSON;

-- 3. 合理使用分区表
CREATE TABLE logs (
id BIGINT PRIMARY KEY,
level VARCHAR(10) NOT NULL,
message TEXT,
created_at DATETIME NOT NULL
)
PARTITION BY RANGE (UNIX_TIMESTAMP(created_at)) (
PARTITION p_current VALUES LESS THAN (UNIX_TIMESTAMP(DATE_ADD(CURDATE(), INTERVAL 1 MONTH))),
PARTITION p_next VALUES LESS THAN (UNIX_TIMESTAMP(DATE_ADD(CURDATE(), INTERVAL 2 MONTH)))
);

2. 索引优化最佳实践

🎯 索引设计原则

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
-- 1. 最左前缀原则
CREATE INDEX idx_composite ON orders(user_id, status, created_at);

-- 2. 覆盖索引
CREATE INDEX idx_covering ON users(email, username, last_login);

-- 3. 索引下推
EXPLAIN SELECT * FROM orders
WHERE user_id = 123 AND status > 1 AND created_at > '2024-01-01';
-- MySQL 8.0+ 支持索引下推优化

-- 4. 避免冗余索引
SHOW INDEX FROM table_name; -- 检查冗余索引

-- 5. 定期维护索引
ANALYZE TABLE table_name; -- 更新索引统计信息
OPTIMIZE TABLE table_name; -- 重组表和索引

3. 查询优化清单

📋 SQL优化检查清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
-- 1. 使用EXPLAIN分析执行计划
EXPLAIN FORMAT=JSON SELECT * FROM large_table WHERE condition;

-- 2. 避免SELECT *
SELECT id, name, email FROM users WHERE status = 'active';

-- 3. 使用LIMIT限制结果集
SELECT * FROM orders ORDER BY created_at DESC LIMIT 100;

-- 4. 合理使用JOIN
SELECT u.name, COUNT(o.id) as order_count
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
WHERE u.created_at >= '2024-01-01'
GROUP BY u.id, u.name;

-- 5. 使用UNION ALL替代UNION
SELECT id, name FROM users WHERE status = 'active'
UNION ALL
SELECT id, name FROM users WHERE created_at >= '2024-01-01';

4. 监控告警体系

📊 性能监控指标

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
-- 创建性能监控表
CREATE TABLE performance_metrics (
id BIGINT PRIMARY KEY AUTO_INCREMENT,
metric_name VARCHAR(100) NOT NULL,
metric_value DECIMAL(10,4) NOT NULL,
collected_at DATETIME NOT NULL,
server_id VARCHAR(50),

INDEX idx_metric_time (metric_name, collected_at),
INDEX idx_server_time (server_id, collected_at)
);

-- 性能监控存储过程
DELIMITER //
CREATE PROCEDURE collect_performance_metrics()
BEGIN
-- QPS监控
INSERT INTO performance_metrics (metric_name, metric_value, collected_at)
SELECT 'qps', (SELECT variable_value FROM performance_schema.global_status
WHERE variable_name = 'Queries' - @last_queries) / 60,
NOW();

-- 慢查询比例
INSERT INTO performance_metrics (metric_name, metric_value, collected_at)
SELECT 'slow_query_ratio',
(SELECT COUNT(*) FROM performance_schema.events_statements_current
WHERE timer_wait > 20000000000) / (SELECT COUNT(*) FROM performance_schema.events_statements_current) * 100,
NOW();

-- 连接使用率
INSERT INTO performance_metrics (metric_name, metric_value, collected_at)
SELECT 'connection_usage',
(SELECT COUNT(*) FROM information_schema.processlist WHERE command != 'Sleep') /
(SELECT @@max_connections) * 100,
NOW();
END //
DELIMITER ;

-- 设置定时任务
CREATE EVENT collect_metrics_event
ON SCHEDULE EVERY 1 MINUTE
DO CALL collect_performance_metrics();

📚 参考资料

📖 推荐书籍

🛠️ 工具资源

📊 在线资源


🚀 MySQL高性能优化,从建表设计到查询优化的全面实战指南!

🎯 掌握这些优化技巧,让你的数据库性能提升10倍以上!