【笔记】Linux LVM使用和迁移

LVM使用

磁盘管理技术,在线磁盘扩容和缩容,数据迁移,条带卷(增加性能)
PV(physical volume),物理卷,系统的裸盘或者分区做成一个PV,是1:1的。
VG(volume group),卷组,将1个或多个PV加入到VG中,可以类比为存储池。
LV(logic voume),逻辑卷,从vg中取一定大小的空间,可以类比为划lun,lv最终呈现给主机的就是硬盘。
使用的时候需要把LV格式化文件系统。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
sdb 8:16 0 20G 0 disk
sr0 11:0 1 1024M 0 rom
nvme0n1 259:0 0 40G 0 disk
├─nvme0n1p1 259:1 0 1G 0 part /boot
└─nvme0n1p2 259:2 0 20G 0 part
└─rl-root 253:0 0 20G 0 lvm /
[root@localhost ~]# pvcreate /dev/sda
Physical volume "/dev/sda" successfully created.
[root@localhost ~]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created.
[root@localhost ~]# vgcreate vg0 /dev/sda
Volume group "vg0" successfully created
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda vg0 lvm2 a-- <20.00g <20.00g
/dev/sdb lvm2 --- 20.00g 20.00g
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
rl 1 1 0 wz--n- 20.00g 0
vg0 1 0 0 wz--n- <20.00g <20.00g
[root@localhost ~]# pvdisplay
--- Physical volume ---
PV Name /dev/nvme0n1p2
VG Name rl
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes (but full)
PE Size 4.00 MiB
Total PE 5120
Free PE 0
Allocated PE 5120
PV UUID ZrllZr-dzmY-K5k8-bKrL-8V3D-jcNp-3o0Ir6

--- Physical volume ---
PV Name /dev/sda
VG Name vg0
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes
PE Size 4.00 MiB
Total PE 5119
Free PE 5119
Allocated PE 0
PV UUID DEOfNN-1Rqt-7KU7-4gXu-nuwr-Sja6-93lnCW

"/dev/sdb" is a new physical volume of "20.00 GiB"
--- NEW Physical volume ---
PV Name /dev/sdb
VG Name
PV Size 20.00 GiB
Allocatable NO
PE Size 0
Total PE 0
Free PE 0
Allocated PE 0
PV UUID lZB416-hIyr-eKiQ-4eLW-2iue-bb3N-ulXIjp

[root@localhost ~]# vgdisplay
--- Volume group ---
VG Name rl
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 2
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 1
Open LV 1
Max PV 0
Cur PV 1
Act PV 1
VG Size 20.00 GiB
PE Size 4.00 MiB
Total PE 5120
Alloc PE / Size 5120 / 20.00 GiB
Free PE / Size 0 / 0
VG UUID tsZJRs-7z9T-s9jI-ODbm-VfcM-ltyQ-YG4F8i

--- Volume group ---
VG Name vg0
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size <20.00 GiB
PE Size 4.00 MiB
Total PE 5119
Alloc PE / Size 0 / 0
Free PE / Size 5119 / <20.00 GiB
VG UUID B5bdof-PwK5-VZeB-vsSs-H3y5-AZhb-qob20r

[root@localhost ~]# lvcreate -L 15G -n lv01 vg0
Logical volume "lv01" created.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 -wi-a----- 15.00g
[root@localhost ~]# lvdisplay
--- Logical volume ---
LV Path /dev/rl/root
LV Name root
VG Name rl
LV UUID cchRZb-w3U5-s6Kh-mfLR-kr5a-TEvn-mGEe89
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-04-28 13:02:41 +0800
LV Status available
# open 1
LV Size 20.00 GiB
Current LE 5120
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:0

--- Logical volume ---
LV Path /dev/vg0/lv01
LV Name lv01
VG Name vg0
LV UUID 1YSxcC-9Tdr-LfeD-KZfA-brbx-1UWY-kbwXBH
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 20:07:47 +0800
LV Status available
# open 0
LV Size 15.00 GiB
Current LE 3840
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:1

[root@localhost ~]# mkfs.xfs /dev/vg0/lv01
meta-data=/dev/vg0/lv01 isize=512 agcount=4, agsize=983040 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=3932160, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mkdir /lv01
[root@localhost ~]# mount /dev/vg0/lv01 /lv01/
[root@localhost ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs tmpfs 3.8G 0 3.8G 0% /dev/shm
tmpfs tmpfs 3.8G 9.0M 3.8G 1% /run
tmpfs tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
/dev/mapper/rl-root xfs 20G 2.5G 18G 13% /
/dev/nvme0n1p1 xfs 1014M 232M 783M 23% /boot
tmpfs tmpfs 766M 0 766M 0% /run/user/0
/dev/mapper/vg0-lv01 xfs 15G 140M 15G 1% /lv01
[root@localhost ~]# cp -r /etc/ /lv01/
[root@localhost ~]# cd /lv01/
[root@localhost lv01]# ls
etc
[root@localhost lv01]# ls etc/
adjtime ethertypes libaudit.conf pam.d skel
aliases exports libblockdev passwd smartmontools

补充:

  1. pvdisplay中,PE Size的大小为4M,Total PE是PE的数量,/dev/sda在创建pv和vg之后,PE数量为5119,4*5119=20,476,正好是20G大小(缺的4M用于放元数据)。
  2. 由于/dev/sdb没有加入卷组,还没有划分PE,所以在pvdisplay中PE为0

LVM扩容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
[root@localhost lv01]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda vg0 lvm2 a-- <20.00g <5.00g
/dev/sdb lvm2 --- 20.00g 20.00g
[root@localhost lv01]# vgs
VG #PV #LV #SN Attr VSize VFree
rl 1 1 0 wz--n- 20.00g 0
vg0 1 1 0 wz--n- <20.00g <5.00g
[root@localhost lv01]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 -wi-ao---- 15.00g
[root@localhost lv01]# vgextend vg0 /dev/sdb
Volume group "vg0" successfully extended
[root@localhost lv01]# vgs
VG #PV #LV #SN Attr VSize VFree
rl 1 1 0 wz--n- 20.00g 0
vg0 2 1 0 wz--n- 39.99g 24.99g
[root@localhost lv01]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda vg0 lvm2 a-- <20.00g <5.00g
/dev/sdb vg0 lvm2 a-- <20.00g <20.00g
[root@localhost lv01]# lvextend -L 25G /dev/vg0/lv01
Size of logical volume vg0/lv01 changed from 15.00 GiB (3840 extents) to 25.00 GiB (6400 extents).
Logical volume vg0/lv01 successfully resized.
[root@localhost lv01]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 -wi-ao---- 25.00g
[root@localhost lv01]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs tmpfs 3.8G 0 3.8G 0% /dev/shm
tmpfs tmpfs 3.8G 9.0M 3.8G 1% /run
tmpfs tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
/dev/mapper/rl-root xfs 20G 2.5G 18G 13% /
/dev/nvme0n1p1 xfs 1014M 232M 783M 23% /boot
tmpfs tmpfs 766M 0 766M 0% /run/user/0
/dev/mapper/vg0-lv01 xfs 15G 167M 15G 2% /lv01
[root@localhost lv01]# xfs_growfs /dev/vg0/lv01
meta-data=/dev/mapper/vg0-lv01 isize=512 agcount=4, agsize=983040 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=3932160, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 3932160 to 6553600
[root@localhost lv01]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs tmpfs 3.8G 0 3.8G 0% /dev/shm
tmpfs tmpfs 3.8G 9.0M 3.8G 1% /run
tmpfs tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
/dev/mapper/rl-root xfs 20G 2.5G 18G 13% /
/dev/nvme0n1p1 xfs 1014M 232M 783M 23% /boot
tmpfs tmpfs 766M 0 766M 0% /run/user/0
/dev/mapper/vg0-lv01 xfs 25G 238M 25G 1% /lv01

LVM迁移

前情提要

数据迁移,想要数据迁移的更快,那我们来剖析一下数据从哪里来到哪里去。
Q:拷贝硬盘的上数据块和拷贝硬盘中的文件哪个比较快?

  • 拷贝硬盘数据块:直接读取硬盘数据块然后写入另一块硬盘。
  • 拷贝硬盘中的文件:操作系统,使用文件系统读取文件/目录,读取硬盘的数据块,然后复制到另一个文件系统之中,然后再写入硬盘。

io的路径越短,拷贝越快,再加上不需要计算,就更快。
但是要考虑场景,如果1个1TB硬盘,只用了20G空间,那么拷贝文件肯定比数据块更快。如果1TB硬盘,用了990G,那么拷贝数据块肯定更快
读取硬盘上的数据块的方案有很多,例如存储硬盘1TB空间,到主机A上,需要将数据迁移到新的存储上,再新的存储上划了1TB空间,映射给主机A,可以通过主机A使用dd将硬盘上所有数据块复制

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
[root@oracle ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 50G 0 disk
├─sda1 8:1 0 500M 0 part /boot
└─sda2 8:2 0 49.5G 0 part
├─vg_fengyh-lv_root (dm-0) 253:0 0 45.6G 0 lvm /
└─vg_fengyh-lv_swap (dm-1) 253:1 0 3.9G 0 lvm [SWAP]
sdb 8:16 0 40G 0 disk /u01
sr0 11:0 1 1024M 0 rom
sdc 8:32 0 2G 0 disk
└─dir01-lv_dir01 (dm-2) 253:2 0 2G 0 lvm /dir01
sdd 8:48 0 2G 0 disk
[root@oracle ~]# dd if=/dev/sdc of=/dev/sdd
记录了4194304+0 的读入
记录了4194304+0 的写出
2147483648字节(2.1 GB)已复制,40.835 秒,52.6 MB/秒
[root@oracle ~]# blkid
/dev/mapper/vg_fengyh-lv_root: UUID="b6f017ca-d1af-4345-8765-fee44efc347e" TYPE="ext4"
/dev/sda1: UUID="4dc4dbdd-a488-4347-b07e-71fad64aa50c" TYPE="ext4"
/dev/sda2: UUID="Oo0EHf-wuQI-OHbK-j2ir-EzMS-Xc3j-dQX4Jj" TYPE="LVM2_member"
/dev/mapper/vg_fengyh-lv_swap: UUID="1e23f7f0-5bf0-49ca-862a-0099130b5706" TYPE="swap"
/dev/sdb: UUID="0a97bf23-5405-4e5f-a2ca-f82743c119af" TYPE="ext4"
/dev/sdc: UUID="cguMcl-dj4u-qXfF-mfZp-DhiJ-lK14-bqF3lU" TYPE="LVM2_member"
/dev/sdd: UUID="cguMcl-dj4u-qXfF-mfZp-DhiJ-lK14-bqF3lU" TYPE="LVM2_member"
/dev/mapper/dir01-lv_dir01: UUID="deece864-f923-482e-8375-b882a31700d2" TYPE="ext4"
#/dev/sdd在复制之前是裸盘,复制时也复制了文件系统,UUID也与/dev/sdc保持一致

换一种思路就是通过存储直接复制
dd方案只适合离线迁移,要停业务
Linux lvm(逻辑卷),数据迁移。

原理

按照“LVM使用”所述,PE与LE是一一对应关系,如下图:

那么迁移步骤就可以分为2部分:

  1. 逻辑卷镜像(lvconvert):创建一个新的相同大小的pv,将LE由与PE一一对应关系变成1:2,在做完镜像之后,新指向的PE会与源PE进行数据对拷,实现数据的同步
  2. 拆分镜像:把初始的PE与LE关系拆除,LE与新PE对应关系变为1:1

实验

创建被迁移的源卷/dev/sda,并放入数据:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[root@localhost ~]# pvcreate /dev/sda
Physical volume "/dev/sda" successfully created.
[root@localhost ~]# vgcreate vg0 /dev/sda
Volume group "vg0" successfully created
[root@localhost ~]# lvcreate -L 5G -n lv01 vg0
WARNING: xfs signature detected on /dev/vg0/lv01 at offset 0. Wipe it? [y/n]: y
Wiping xfs signature on /dev/vg0/lv01.
Logical volume "lv01" created.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 -wi-a----- 5.00g
[root@localhost ~]# mkfs.xfs /dev/vg0/lv01
meta-data=/dev/vg0/lv01 isize=512 agcount=4, agsize=327680 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=1310720, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mount /dev/vg0/lv01 /lv01/
[root@localhost ~]# cp -r /etc/ /lv01/
[root@localhost ~]# cd /lv01/
[root@localhost lv01]# ls
etc

逻辑卷镜像:lvconvert -m 1 [lv] [要被镜像的pv]

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
[root@localhost lv01]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda vg0 lvm2 a-- <20.00g <15.00g
[root@localhost lv01]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
└─vg0-lv01 253:1 0 5G 0 lvm /lv01
sdb 8:16 0 20G 0 disk
sr0 11:0 1 1024M 0 rom
nvme0n1 259:0 0 40G 0 disk
├─nvme0n1p1 259:1 0 1G 0 part /boot
└─nvme0n1p2 259:2 0 20G 0 part
└─rl-root 253:0 0 20G 0 lvm /
[root@localhost lv01]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created.
[root@localhost lv01]# vgextend vg0 /dev/sdb
Volume group "vg0" successfully extended

#-m 1代表镜像一份
[root@localhost lv01]# lvconvert -m 1 /dev/vg0/lv01 /dev/sdb
Are you sure you want to convert linear LV vg0/lv01 to raid1 with 2 images enhancing resilience? [y/n]: y
Logical volume vg0/lv01 successfully converted.

#lvs查看拷贝进度
[root@localhost lv01]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 rwi-aor--- 5.00g 6.25
[root@localhost lv01]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 rwi-aor--- 5.00g 18.75
[root@localhost lv01]# lvdisplay
--- Logical volume ---
LV Path /dev/rl/root
LV Name root
VG Name rl
LV UUID cchRZb-w3U5-s6Kh-mfLR-kr5a-TEvn-mGEe89
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-04-28 13:02:41 +0800
LV Status available
# open 1
LV Size 20.00 GiB
Current LE 5120
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:0

--- Logical volume ---
LV Path /dev/vg0/lv01
LV Name lv01
VG Name vg0
LV UUID UzEOmf-ko6J-N8NM-fbe0-l6Fe-VaJW-3sXTMx
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 20:41:05 +0800
LV Status available
# open 1
LV Size 5.00 GiB
Current LE 1280
Mirrored volumes 2
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:1

[root@localhost lv01]# pvdisplay
--- Physical volume ---
PV Name /dev/nvme0n1p2
VG Name rl
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes (but full)
PE Size 4.00 MiB
Total PE 5120
Free PE 0
Allocated PE 5120
PV UUID ZrllZr-dzmY-K5k8-bKrL-8V3D-jcNp-3o0Ir6

--- Physical volume ---
PV Name /dev/sda
VG Name vg0
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes
PE Size 4.00 MiB
Total PE 5119
Free PE 3838
Allocated PE 1281
PV UUID ZzqIq7-ydPa-E1qk-c0L5-At4e-cCv5-dpDrRU

--- Physical volume ---
PV Name /dev/sdb
VG Name vg0
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes
PE Size 4.00 MiB
Total PE 5119
Free PE 3838
Allocated PE 1281
PV UUID hpoGGr-cU1b-pb1g-MQwM-kVTT-ahdc-J0OOc2

[root@localhost lv01]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 rwi-aor--- 5.00g 100.00

拆除镜像:lvconvert -m 0 [lv] [要被拆除镜像的Pv]

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
[root@localhost lv01]# lvconvert -m 0 /dev/vg0/lv01 /dev/sda
Are you sure you want to convert raid1 LV vg0/lv01 to type linear losing all resilience? [y/n]: y
Logical volume vg0/lv01 successfully converted.
[root@localhost lv01]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda vg0 lvm2 a-- <20.00g <20.00g
/dev/sdb vg0 lvm2 a-- <20.00g <15.00g
[root@localhost lv01]# pvdisplay
--- Physical volume ---
PV Name /dev/nvme0n1p2
VG Name rl
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes (but full)
PE Size 4.00 MiB
Total PE 5120
Free PE 0
Allocated PE 5120
PV UUID ZrllZr-dzmY-K5k8-bKrL-8V3D-jcNp-3o0Ir6

--- Physical volume ---
PV Name /dev/sda
VG Name vg0
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes
PE Size 4.00 MiB
Total PE 5119
Free PE 5119
Allocated PE 0
PV UUID ZzqIq7-ydPa-E1qk-c0L5-At4e-cCv5-dpDrRU

--- Physical volume ---
PV Name /dev/sdb
VG Name vg0
PV Size 20.00 GiB / not usable 4.00 MiB
Allocatable yes
PE Size 4.00 MiB
Total PE 5119
Free PE 3839
Allocated PE 1280
PV UUID hpoGGr-cU1b-pb1g-MQwM-kVTT-ahdc-J0OOc2

[root@localhost lv01]# vgreduce vg0 /dev/sda
Removed "/dev/sda" from volume group "vg0"
[root@localhost lv01]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda lvm2 --- 20.00g 20.00g
/dev/sdb vg0 lvm2 a-- <20.00g <15.00g
[root@localhost lv01]# pvremove /dev/sda
Labels on physical volume "/dev/sda" successfully wiped.

把LV导出,导入到另一个节点

实现数据复制到另一块物理盘上,把物理盘拔出插到其他业务主机,实现备份。
格式:lvconvert --splitmirrors Number -n --name LVnew LV
vgsplit VG VG PVvgsplit -n|--name LV VG VG

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sdb vg0 lvm2 a-- <20.00g <15.00g
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
rl 1 1 0 wz--n- 20.00g 0
vg0 1 1 0 wz--n- <20.00g <15.00g
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 -wi-a----- 5.00g
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
sdb 8:16 0 20G 0 disk
└─vg0-lv01 253:1 0 5G 0 lvm
sr0 11:0 1 1024M 0 rom
nvme0n1 259:0 0 40G 0 disk
├─nvme0n1p1 259:1 0 1G 0 part /boot
└─nvme0n1p2 259:2 0 20G 0 part
└─rl-root 253:0 0 20G 0 lvm /
[root@localhost ~]# pvcreate /dev/sda
Physical volume "/dev/sda" successfully created.
[root@localhost ~]# vgextend vg0 /dev/sda
Volume group "vg0" successfully extended
[root@localhost ~]# lvconvert -m 1 /dev/vg0/lv01 /dev/sda
Are you sure you want to convert linear LV vg0/lv01 to raid1 with 2 images enhancing resilience? [y/n]: y
Logical volume vg0/lv01 successfully converted.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 rwi-a-r--- 5.00g 18.75
[root@localhost ~]# lv
lvchange lvdisplay lvmconfig lvmdump lvmsadc lvremove lvs
lvconvert lvextend lvmdevices lvm_import_vdo lvmsar lvrename lvscan
lvcreate lvm lvmdiskscan lvmpolld lvreduce lvresize
[root@localhost ~]# man lvconvert
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 rwi-a-r--- 5.00g 100.00

[root@localhost ~]# lvconvert --splitmirrors 1 -n lv02 /dev/vg0/lv01
Are you sure you want to split raid1 LV vg0/lv01 losing all resilience? [y/n]: y
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda vg0 lvm2 a-- <20.00g <15.00g
/dev/sdb vg0 lvm2 a-- <20.00g <15.00g
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 -wi-a----- 5.00g
lv02 vg0 -wi-a----- 5.00g
[root@localhost ~]# lvdisplay
--- Logical volume ---
LV Path /dev/rl/root
LV Name root
VG Name rl
LV UUID cchRZb-w3U5-s6Kh-mfLR-kr5a-TEvn-mGEe89
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-04-28 13:02:41 +0800
LV Status available
# open 1
LV Size 20.00 GiB
Current LE 5120
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:0

--- Logical volume ---
LV Path /dev/vg0/lv01
LV Name lv01
VG Name vg0
LV UUID UzEOmf-ko6J-N8NM-fbe0-l6Fe-VaJW-3sXTMx
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 20:41:05 +0800
LV Status available
# open 0
LV Size 5.00 GiB
Current LE 1280
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:1

--- Logical volume ---
LV Path /dev/vg0/lv02
LV Name lv02
VG Name vg0
LV UUID SiM4cn-PMjL-gpKJ-7e8I-ayox-yGrI-xGV04s
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 21:03:55 +0800
LV Status available
# open 0
LV Size 5.00 GiB
Current LE 1280
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:5

[root@localhost ~]# vg
vgcfgbackup vgck vgdisplay vgimport vgmerge vgremove vgscan
vgcfgrestore vgconvert vgexport vgimportclone vgmknodes vgrename vgsplit
vgchange vgcreate vgextend vgimportdevices vgreduce vgs
[root@localhost ~]# man vgsplit
[root@localhost ~]# man vgsplit
[root@localhost ~]# vgsplit vg0 vg1 /dev/sda
Logical volume vg0/lv02 must be inactive.

#上述提示要先将lv02变为非激活状态,使用-an选项
[root@localhost ~]# lvchange -an /dev/vg0/lv02
[root@localhost ~]# lvdisplay
--- Logical volume ---
LV Path /dev/rl/root
LV Name root
VG Name rl
LV UUID cchRZb-w3U5-s6Kh-mfLR-kr5a-TEvn-mGEe89
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-04-28 13:02:41 +0800
LV Status available
# open 1
LV Size 20.00 GiB
Current LE 5120
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:0

--- Logical volume ---
LV Path /dev/vg0/lv01
LV Name lv01
VG Name vg0
LV UUID UzEOmf-ko6J-N8NM-fbe0-l6Fe-VaJW-3sXTMx
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 20:41:05 +0800
LV Status available
# open 0
LV Size 5.00 GiB
Current LE 1280
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:1

--- Logical volume ---
LV Path /dev/vg0/lv02
LV Name lv02
VG Name vg0
LV UUID SiM4cn-PMjL-gpKJ-7e8I-ayox-yGrI-xGV04s
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 21:03:55 +0800
LV Status NOT available
LV Size 5.00 GiB
Current LE 1280
Segments 1
Allocation inherit
Read ahead sectors auto

[root@localhost ~]# vgsplit vg0 vg1 /dev/sda
New volume group "vg1" successfully split from "vg0"
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
rl 1 1 0 wz--n- 20.00g 0
vg0 1 1 0 wz--n- <20.00g <15.00g
vg1 1 1 0 wz--n- <20.00g <15.00g
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root rl -wi-ao---- 20.00g
lv01 vg0 -wi-a----- 5.00g
lv02 vg1 -wi------- 5.00g
[root@localhost ~]# vgdisplay vg0
--- Volume group ---
VG Name vg0
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 16
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 1
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size <20.00 GiB
PE Size 4.00 MiB
Total PE 5119
Alloc PE / Size 1280 / 5.00 GiB
Free PE / Size 3839 / <15.00 GiB
VG UUID SuhDg9-pErc-NHtz-bWeM-m91R-nWRV-3CwEp4

[root@localhost ~]# vgdisplay -v vg0
--- Volume group ---
VG Name vg0
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 16
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 1
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size <20.00 GiB
PE Size 4.00 MiB
Total PE 5119
Alloc PE / Size 1280 / 5.00 GiB
Free PE / Size 3839 / <15.00 GiB
VG UUID SuhDg9-pErc-NHtz-bWeM-m91R-nWRV-3CwEp4

--- Logical volume ---
LV Path /dev/vg0/lv01
LV Name lv01
VG Name vg0
LV UUID UzEOmf-ko6J-N8NM-fbe0-l6Fe-VaJW-3sXTMx
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 20:41:05 +0800
LV Status available
# open 0
LV Size 5.00 GiB
Current LE 1280
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:1

--- Physical volumes ---
PV Name /dev/sdb
PV UUID hpoGGr-cU1b-pb1g-MQwM-kVTT-ahdc-J0OOc2
PV Status allocatable
Total PE / Free PE 5119 / 3839

[root@localhost ~]# vgdisplay -v vg1
--- Volume group ---
VG Name vg1
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 2
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 1
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size <20.00 GiB
PE Size 4.00 MiB
Total PE 5119
Alloc PE / Size 1280 / 5.00 GiB
Free PE / Size 3839 / <15.00 GiB
VG UUID poPXYZ-nSia-YmbF-Eg7p-it6u-X3bs-Ey6HVe

--- Logical volume ---
LV Path /dev/vg1/lv02
LV Name lv02
VG Name vg1
LV UUID SiM4cn-PMjL-gpKJ-7e8I-ayox-yGrI-xGV04s
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2024-06-14 21:03:55 +0800
LV Status NOT available
LV Size 5.00 GiB
Current LE 1280
Segments 1
Allocation inherit
Read ahead sectors auto

--- Physical volumes ---
PV Name /dev/sda
PV UUID y9gxEh-1IXq-C0Qx-EGLC-3iLu-qurH-YAlYhP
PV Status allocatable
Total PE / Free PE 5119 / 3839

[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/nvme0n1p2 rl lvm2 a-- 20.00g 0
/dev/sda vg1 lvm2 a-- <20.00g <15.00g
/dev/sdb vg0 lvm2 a-- <20.00g <15.00g

这时pv已经拆除,但问题是,如何将盘拔出?

  1. 如果直接拔出的话,pv层面会出问题,在pvs时会提示pv故障,逻辑卷丢失的情况:

  1. 也不可以pvremove,这样会把pv删除