lvm senario

overview

create LVs

If you want to create three Logical Volumes (LVs) of sizes 1TB, 1TB, and 5TB on a single Volume Group (VG) for each disk, here’s how you can do it step by step.

Assume:

Disks: /dev/nvme0n1, /dev/nvme1n1, /dev/nvme2n1

Volume Groups: vg_nvme0, vg_nvme1, vg_nvme2

Logical Volumes (LVs): lv1, lv2, lv3 for each VG

LV Sizes: 1TB, 1TB, and 5TB

1. Create Physical Volumes (PVs)

1
2
3
4
# Initialize each disk as a PV:
sudo pvcreate /dev/nvme0n1
sudo pvcreate /dev/nvme1n1
sudo pvcreate /dev/nvme2n1

2. Create Volume Groups (VGs)

1
2
3
4
# Create a VG for each disk:
sudo vgcreate vg_nvme0 /dev/nvme0n1
sudo vgcreate vg_nvme1 /dev/nvme1n1
sudo vgcreate vg_nvme2 /dev/nvme2n1

3. Create Logical Volumes (LVs)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# Allocate 1TB, 1TB, and 5TB LVs from each VG.

# For vg_nvme0:
sudo lvcreate -L 1T -n lv1 vg_nvme0
sudo lvcreate -L 1T -n lv2 vg_nvme0
sudo lvcreate -L 5T -n lv3 vg_nvme0

# For vg_nvme1:
sudo lvcreate -L 1T -n lv1 vg_nvme1
sudo lvcreate -L 1T -n lv2 vg_nvme1
sudo lvcreate -L 5T -n lv3 vg_nvme1

# For vg_nvme2:
sudo lvcreate -L 1T -n lv1 vg_nvme2
sudo lvcreate -L 1T -n lv2 vg_nvme2
sudo lvcreate -L 5T -n lv3 vg_nvme2

# use whole vg
sudo lvcreate -l 100%FREE -n lv vg_xxx

4. Verify the Setup

1
2
# Check the LVs and their sizes:
sudo lvs

You should see something like:

1
2
3
4
5
6
7
8
9
10
11
LV   VG        Attr       LSize

lv1 vg_nvme0 -wi-a----- 1.00t
lv2 vg_nvme0 -wi-a----- 1.00t
lv3 vg_nvme0 -wi-a----- 5.00t
lv1 vg_nvme1 -wi-a----- 1.00t
lv2 vg_nvme1 -wi-a----- 1.00t
lv3 vg_nvme1 -wi-a----- 5.00t
lv1 vg_nvme2 -wi-a----- 1.00t
lv2 vg_nvme2 -wi-a----- 1.00t
lv3 vg_nvme2 -wi-a----- 5.00t

5. Format the LVs

1
2
3
4
5
6
7
8
9
10
11
12
# Use xfs or another filesystem to format the LVs:
sudo mkfs.xfs /dev/vg_nvme0/lv1
sudo mkfs.xfs /dev/vg_nvme0/lv2
sudo mkfs.xfs /dev/vg_nvme0/lv3

sudo mkfs.xfs /dev/vg_nvme1/lv1
sudo mkfs.xfs /dev/vg_nvme1/lv2
sudo mkfs.xfs /dev/vg_nvme1/lv3

sudo mkfs.xfs /dev/vg_nvme2/lv1
sudo mkfs.xfs /dev/vg_nvme2/lv2
sudo mkfs.xfs /dev/vg_nvme2/lv3

6. Mount the LVs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Create directories for each LV and mount them:
sudo mkdir -p /mnt/nvme0/lv1 /mnt/nvme0/lv2 /mnt/nvme0/lv3
sudo mkdir -p /mnt/nvme1/lv1 /mnt/nvme1/lv2 /mnt/nvme1/lv3
sudo mkdir -p /mnt/nvme2/lv1 /mnt/nvme2/lv2 /mnt/nvme2/lv3

sudo mount /dev/vg_nvme0/lv1 /mnt/nvme0/lv1
sudo mount /dev/vg_nvme0/lv2 /mnt/nvme0/lv2
sudo mount /dev/vg_nvme0/lv3 /mnt/nvme0/lv3

sudo mount /dev/vg_nvme1/lv1 /mnt/nvme1/lv1
sudo mount /dev/vg_nvme1/lv2 /mnt/nvme1/lv2
sudo mount /dev/vg_nvme1/lv3 /mnt/nvme1/lv3

sudo mount /dev/vg_nvme2/lv1 /mnt/nvme2/lv1
sudo mount /dev/vg_nvme2/lv2 /mnt/nvme2/lv2
sudo mount /dev/vg_nvme2/lv3 /mnt/nvme2/lv3

7. Persist the Mounts

1
2
3
4
5
6
7
8
9
10
11
12
# Add the mounts to /etc/fstab for automatic remounting on boot:
echo '/dev/vg_nvme0/lv1 /mnt/nvme0/lv1 xfs defaults 0 0' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme0/lv2 /mnt/nvme0/lv2 xfs defaults 0 0' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme0/lv3 /mnt/nvme0/lv3 xfs defaults 0 0' | sudo tee -a /etc/fstab

echo '/dev/vg_nvme1/lv1 /mnt/nvme1/lv1 xfs defaults 0 0' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme1/lv2 /mnt/nvme1/lv2 xfs defaults 0 0' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme1/lv3 /mnt/nvme1/lv3 xfs defaults 0 0' | sudo tee -a /etc/fstab

echo '/dev/vg_nvme2/lv1 /mnt/nvme2/lv1 xfs defaults 0 0' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme2/lv2 /mnt/nvme2/lv2 xfs defaults 0 0' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme2/lv3 /mnt/nvme2/lv3 xfs defaults 0 0' | sudo tee -a /etc/fstab
1
2
3
4
5
6
7
8
9
10
11
12
13
# **Final Directory Layout**
# **Disk** **VG Name** **LV Name** **Size** **Mount Point**
/dev/nvme0n1 vg_nvme0 lv1 1T /mnt/nvme0/lv1
lv2 1T /mnt/nvme0/lv2
lv3 5T /mnt/nvme0/lv3

/dev/nvme1n1 vg_nvme1 lv1 1T /mnt/nvme1/lv1
lv2 1T /mnt/nvme1/lv2
lv3 5T /mnt/nvme1/lv3

/dev/nvme2n1 vg_nvme2 lv1 1T /mnt/nvme2/lv1
lv2 1T /mnt/nvme2/lv2
lv3 5T /mnt/nvme2/lv3

defaults 0 x

The difference is in the last number (0 vs 2) in the fstab entry, which represents the filesystem check (fsck) pass number. Here’s what these numbers mean:

Last field (6th field) in fstab:

  • 0 = No filesystem check will be done at boot time
  • 1 = Filesystem will be checked first (typically used for root filesystem /)
  • 2 = Filesystem will be checked after pass 1 filesystems (typically used for other filesystems)

So:

  • defaults 0 0 means the filesystem will never be automatically checked during boot
  • defaults 0 2 means the filesystem will be checked during boot, but after the root filesystem

Best practices:

  • Use 0 1 for the root filesystem (/)
  • Use 0 2 for other important filesystems that should be checked
  • Use 0 0 for pseudo-filesystems (like proc, sysfs) or filesystems that don’t need checking (like swap)

wipe disk and create lvm

assum dev is /dev/nvme2n1

To erase all partitions on the device /dev/nvme2n1 and create multiple logical volumes (LVs) using the LVM framework, follow these steps:

1. Verify Device and Backup Data

1
2
# Ensure you are working on the correct device. Erasing partitions will delete all data on the device.
sudo lsblk -o NAME,SIZE,TYPE,MOUNTPOINT /dev/nvme2n1

2. Erase Existing Partitions

1
2
3
4
5
# Clear the Partition Table, To wipe the partition table completely:
sudo wipefs -a /dev/nvme2n1

# Verify the Disk is Clean, Check that no partitions remain
sudo lsblk /dev/nvme2n1

if you want use device directly instead of make lv, you can use below command format origin device

sudo mkfs -t ext4 /dev/nvme2n1

or

sudo mkfs -t xfs /dev/nvme2n1

3. Create Physical Volume (PV)

1
2
3
4
5
# Convert the entire disk into an LVM physical volume:
sudo pvcreate /dev/nvme2n1

# Verify the PV
sudo pvdisplay

4. Create Volume Group (VG)

1
2
3
4
5
# Create a volume group that spans the entire disk:
sudo vgcreate vg_nvme2n1 /dev/nvme2n1

# Verify the VG:
sudo vgdisplay

5. Create Logical Volumes (LVs)

Example: Create Three LVs

LV1: 1TB

LV2: 1TB

LV3: Remaining space

1
2
3
4
5
6
7
sudo lvcreate -L 1T -n lv1 vg_nvme2n1
sudo lvcreate -L 1T -n lv2 vg_nvme2n1
sudo lvcreate -l 100%FREE -n lv3 vg_nvme2n1 --wipesignatures y
# 注意如果在lvcreate的时候一直提示 warning wipe offset xxx,那执行 sudo lvcreate xxx -y (加-y参数)

# Verify the LVs:
sudo lvdisplay

6. Format Logical Volumes

1
2
3
4
# Format each logical volume with your desired file system (e.g., XFS):
sudo mkfs.xfs /dev/vg_nvme2n1/lv1
sudo mkfs.xfs /dev/vg_nvme2n1/lv2
sudo mkfs.xfs /dev/vg_nvme2n1/lv3

7. Mount Logical Volumes

1
2
3
4
5
6
7
8
# Create mount points and mount the LVs:
sudo mkdir -p /mnt/nvme2n1/lv1 /mnt/nvme2n1/lv2 /mnt/nvme2n1/lv3

sudo mount /dev/vg_nvme2n1/lv1 /mnt/nvme2n1/lv1
sudo mount /dev/vg_nvme2n1/lv2 /mnt/nvme2n1/lv2
sudo mount /dev/vg_nvme2n1/lv3 /mnt/nvme2n1/lv3
# Verify the mounts:
df -h

8. Make the Mounts Persistent

1
2
3
4
# Add entries to /etc/fstab to ensure the LVs are mounted on reboot:
echo '/dev/vg_nvme2n1/lv1 /mnt/nvme2n1/lv1 xfs defaults 0 2' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme2n1/lv2 /mnt/nvme2n1/lv2 xfs defaults 0 2' | sudo tee -a /etc/fstab
echo '/dev/vg_nvme2n1/lv3 /mnt/nvme2n1/lv3 xfs defaults 0 2' | sudo tee -a /etc/fstab

wipe existed lv

1. Check What’s Using the LV

First, identify what is still using the LV:

1
sudo lsof | grep /dev/mapper/<lv-name>

Also, check active processes using the device:

1
sudo fuser -m /dev/mapper/<lv-name>

If any process is using the LV, stop it:

1
sudo kill -9 <PID>

2. Unmount If Mounted

Check if the LV is mounted:

1
mount | grep /dev/mapper/<lv-name>

If it is mounted, unmount it:

1
sudo umount -l /dev/mapper/<lv-name>

Use -l (lazy unmount) to force unmount if needed.

3. Disable the LV

Before removing the LV, deactivate it:

1
sudo lvchange -an /dev/<vg-name>/<lv-name>

Now try to remove it:

1
sudo dmsetup remove /dev/mapper/<lv-name>

4. Forcefully Remove LV, VG, and PV

If the above steps don’t work, forcefully remove everything:

1
2
3
sudo lvremove -f /dev/<vg-name>/<lv-name>
sudo vgremove -f <vg-name>
sudo pvremove -f /dev/<device-name>

Check again with:

1
2
3
lsblk
sudo vgs
sudo lvs

5. If Everything Fails

If none of the above works, the safest option is to reboot:

1
2
sudo wipefs --all --force /dev/<device-name>
sudo reboot

Extend lv size

1
2
3
4
5
6
7
8
# step1
lvextend -l +100%FREE /path/to/lv # e.g. /dev/ubuntu-vg/ubuntu-lv

# step2
# for ext4
resize2fs /path/to/lv # e.g. /dev/ubuntu-vg/ubuntu-lv
# for xfs
xfs_growfs /path/to/extend-path # e.g. /mnt/data

troubleshooting

fix lv issue

e.g. /dev/vg_disk3/lv -> /mnt/disk3 is error

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# Unmount the filesystem (if still mounted):
umount -f /mnt/disk3 # Force unmount if stuck

# optional: If the filesystem remains busy, identify processes using lsof or fuser:
fuser -vm /dev/vg_disk3/lv # Alternative method

# Check the device’s I/O status:
dmsetup info vg_disk3-lv
# Attempt to Clear Stuck I/O
dmsetup suspend vg_disk3-lv
dmsetup resume vg_disk3-lv
dmsetup remove --force vg_disk3-lv

# Check for LVM Snapshots or Mirrors
lvs | grep vg_disk3
lvremove /dev/vg_disk3/<snapshot_name>

# Deactivate the Volume Group
vgchange -an vg_disk3 # Deactivate all volumes in the group

# optional: Locate the LVM Metadata Backup
ls -lt /etc/lvm/archive/ | grep vg_disk3
# optional: Restore the Volume Group Configuration
vgcfgrestore -f /etc/lvm/archive/vg_disk3_NNNNNN.vg vg_disk3
# optional: Verify the Logical Volume is Restored
lvdisplay /dev/vg_disk3/lv

# Repair the XFS Filesystem
vgchange -ay vg_disk3 # Activate the volume group
# optional: create lv
sudo lvcreate -l 100%FREE -n lv vg_disk3
# repair lv (need long times)
xfs_repair /dev/vg_disk3/lv # If I/O errors persist, try: xfs_repair -L /dev/vg_disk3/lv

# remount
mount /dev/vg_disk3/lv /mnt/disk3
# check
ls /mnt/disk3 # Check for I/O errors
xfs_check /dev/vg_disk3/lv # Validate filesystem health

Logical volume <vg-name>/<lv-name> contains a filesystem in use

  1. check used process

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    # mount point
    sudo lsof /mnt/disk0
    # e.g. print
    COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
    bash 1067388 dingofs cwd DIR 259,4 64 2147483776 /mnt/disk0/install/dingo

    # lv
    sudo fuser -m /dev/mapper/<lv-name>
    # e.g. print
    /dev/dm-3: 3733654m # 3733654 is process id

    # kill reference pid
    kill -9 3733654
  2. optional: Search for processes using the device by major/minor numbers

1
2
3
dmsetup info -c vg_disk3-lv
or
dmsetup info vg_disk3-lv

e.g. console print

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# dmsetup info -c vg_disk3-lv , print below
Name Maj Min Stat Open Targ Event UUID
vg_disk3-lv 253 4 L--w 1 1 0 LVM-jKQjctpauajJM9B6bT1OaygGfZYROrC2FNNC1tRiXxEG14UBO2GBN4umaKvne0FI

# dmsetup info vg_disk3-lv, print below
Name: vg_disk3-lv
State: ACTIVE (DEFERRED REMOVE)
Read Ahead: 256
Tables present: LIVE
Open count: 1
Event number: 0
Major, minor: 253, 4
Number of targets: 1
UUID: LVM-jKQjctpauajJM9B6bT1OaygGfZYROrC2FNNC1tRiXxEG14UBO2GBN4umaKvne0FI
  1. search for processes using those numbers (replace 253,X) with actual numbers

    1
    2
    3
    4
    lsof -n 2>&1 | grep '253,X'

    # e.g. according above example
    lsof -n 2>&1 | grep "253,4"