1.精简卷介绍
redhat官网详细介绍:
https://access.redhat.com/documentation/zh-cn/red_hat_enterprise_linux/8/html/configuring_and_managing_logical_volumes/creating-and-managing-thin-provisioned-volumes_configuring-and-managing-logical-volumes
2.精简卷环境测试
2.1.添加测试磁盘(sdb)
[root@csdb ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 35G 0 disk
├─sda1 8:1 0 300M 0 part /boot
└─sda2 8:2 0 34.7G 0 part ├─rhel-root 253:0 0 17.7G 0 lvm /├─rhel-swap 253:1 0 2G 0 lvm [SWAP]└─rhel-u01 253:2 0 15G 0 lvm /u01
sdb 8:16 0 1G 0 disk
sr0 11:0 1 4.2G 0 rom
2.2.创建并挂载精简卷
## 创建pv
[root@csdb ~]# pvcreate /dev/sdbPhysical volume "/dev/sdb" successfully created.## 创建vg
[root@csdb ~]# vgcreate vg_cs /dev/sdbVolume group "vg_cs" successfully created## 创建精简卷池
[root@csdb ~]# lvcreate -L 1010M -T vg_cs/cspoolRounding up size to full physical extent 1012.00 MiBThin pool volume with chunk size 64.00 KiB can address at most 15.81 TiB of data.Logical volume "cspool" created.## 创建精简卷
[root@csdb ~]# lvcreate -V 200G -T vg_cs/cspool -n lv_csWARNING: Sum of all thin volume sizes (200.00 GiB) exceeds the size of thin pool vg_cs/cspool and the size of whole volume group (1020.00 MiB).WARNING: You have not turned on protection against thin pools running out of space.WARNING: Set activation/thin_pool_autoextend_threshold below 100 to trigger automatic extension of thin pools before they get full.Logical volume "lv_cs" created.## 挂载精简卷
[root@csdb ~]# mkfs -t xfs /dev/vg_cs/lv_cs
meta-data=/dev/vg_cs/lv_cs isize=512 agcount=16, agsize=3276800 blks= sectsz=512 attr=2, projid32bit=1= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=52428800, imaxpct=25= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=25600, version=2= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0[root@csdb /]# mkdir cs
[root@csdb ~]# mount /dev/vg_cs/lv_cs /cs
[root@csdb ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/rhel-root 18G 3.9G 14G 22% /
devtmpfs 894M 0 894M 0% /dev
tmpfs 910M 0 910M 0% /dev/shm
tmpfs 910M 27M 884M 3% /run
tmpfs 910M 0 910M 0% /sys/fs/cgroup
/dev/sda1 297M 161M 137M 54% /boot
/dev/mapper/rhel-u01 15G 6.0G 9.1G 40% /u01
tmpfs 182M 12K 182M 1% /run/user/42
tmpfs 182M 0 182M 0% /run/user/0
/dev/mapper/vg_cs-lv_cs 200G 33M 200G 1% /cs
2.3.数据库测试
## 创建测试表空间
SQL> create tablespace cs datafile '/cs/cs01.dbf' size 600M;Tablespace created.[oracle@csdb ~]$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/rhel-root 18G 3.9G 14G 22% /
devtmpfs 894M 0 894M 0% /dev
tmpfs 910M 0 910M 0% /dev/shm
tmpfs 910M 27M 884M 3% /run
tmpfs 910M 0 910M 0% /sys/fs/cgroup
/dev/sda1 297M 161M 137M 54% /boot
/dev/mapper/rhel-u01 15G 6.1G 9.0G 41% /u01
tmpfs 182M 12K 182M 1% /run/user/42
tmpfs 182M 0 182M 0% /run/user/0
/dev/mapper/vg_cs-lv_cs 200G 633M 200G 1% /cs此时空间使用率查看正常,添加600M数据文件,使用空间也相对增加,剩余空间还有很多很多,继续添加数据文件测试。SQL> alter tablespace cs add datafile '/cs/cs02.dbf' size 600M;
alter tablespace cs add datafile '/cs/cs02.dbf' size 600M
*
ERROR at line 1:
ORA-01119: error in creating database file '/cs/cs02.dbf'
ORA-27052: unable to flush file data
Linux-x86_64 Error: 5: Input/output error
Additional information: 1此时,出现错误ORA-01119、ORA-27052。## 查看错误介绍
[oracle@csdb ~]$ oerr ORA 27052
27052, 00000, "unable to flush file data"
// *Cause: fsync system call returned error, additional information indicates which function encountered the error
// *Action: check errno[oracle@csdb ~]$ oerr ORA 01119
01119, 00000, "error in creating database file '%s'"
// *Cause: Usually due to not having enough space on the device.
// *Action:有明显的原因,就是因为由于设备上没有足够的空间导致的。此时,df -h看到的空间使用率才1%,基本上未使用。这就涉及到了一个精简卷的问题,从上面测试中可以看到,实际总磁盘大小1GB,分配1GB给精简卷池,从精简池中又分配200GB空间给精简卷,远远大于磁盘、VG、精简池的大小,从而导致了df -h数据的不准确。
如果没有上帝视角,不清楚精简卷,只看df -h的显示结果,也可能会脑袋一懵。建议使用 lsblk 或 lvs -a -o +devices 查看实际所用磁盘大小或lvm的大小。