cat /proc/mdstat #status
mdadm --detail /dev/md0 # také status
mdadm --create /dev/md0 -n2 -l1 /dev/sda1 /dev/sdb1 # vytvoření nového pole z sda1 a sda2
mdadm --add /dev/md0 /dev/sdc1 # přidání sdc1 do pole
mdadm --detail --scan # zobrazení parametrů pole
Pokud se pole rozpadne, tak to poznáme takto:
root@MB:~# mdadm --detail /dev/md1
# také status
/dev/md1:
Version : 1.2
Creation Time : Thu May 5 16:24:29 2022
Raid Level : raid1
Array Size : 4882680832 (4656.49 GiB
4999.87 GB)
Used Dev Size : 4882680832 (4656.49 GiB
4999.87 GB)
Raid Devices : 2
Total Devices : 1
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Sun Nov 27 11:48:42 2022
State : clean, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
Consistency Policy : bitmap
Name : MB.JFila.cz:1 (local to host MB.JF)
UUID :
db6464ea:3b6ac1a1:3a3f26ec:20cb5e65
Events : 70585
Number Major Minor
RaidDevice State
-
0 0 0
removed
1
8 17 1
active sync /dev/sdb1
Nejprve si zjistíme, který disk vypadl:
root@MB:~# blkid | grep MB.JFil
/dev/sda1: UUID="db6464ea-3b6a-c1a1-3a3f-26ec20cb5e65" UUID_SUB="f85690c7-00b4-bdaf-9678-ba49025766ce" LABEL="MB.JF:1" TYPE="linux_raid_member" PARTUUID="8e221fd9-1b67-4b6c-8dad-6fb1f6a3e420"
/dev/sdb1: UUID="db6464ea-3b6a-c1a1-3a3f-26ec20cb5e65" UUID_SUB="3af1ed68-00dd-c355-40cf-ad836f0a3647" LABEL="MB.JF:1" TYPE="linux_raid_member" PARTUUID="d3360b04-a4f3-764c-82ec-da41c5dd5d30"
a druhý HDD přidáme následujícím příkazem:
root@MB:~# mdadm /dev/md1 -a /dev/sda1
mdadm: re-added /dev/sda1
root@MB:~# mdadm --detail /dev/md1 # také status
/dev/md1:
Version : 1.2
Creation Time : Thu May 5 16:24:29 2022
Raid Level : raid1
Array Size : 4882680832 (4656.49 GiB 4999.87 GB)
Used Dev Size : 4882680832 (4656.49 GiB 4999.87 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Sun Nov 27 11:52:39 2022
State : clean, degraded, recovering
Active Devices : 1
Working Devices : 2
Failed Devices : 0
Spare Devices : 1
Consistency Policy : bitmap
Rebuild Status : 2% complete
Name : MB.JFila.cz:1 (local to host MB.JF)
UUID : db6464ea:3b6ac1a1:3a3f26ec:20cb5e65
Events : 70587
Number Major Minor RaidDevice State
3 8 1 0 spare rebuilding /dev/sda1
1 8 17 1 active sync /dev/sdb1
A nyní již probíhá obnova:
root@MB:~# cat /proc/mdstat #status
Personalities : [linear] [raid0] [raid1] [raid10] [raid6] [raid5] [raid4] [multipath]
md1 : active raid1 sda1[3] sdb1[1]
4882680832 blocks super 1.2 [2/1] [_U]
[>....................] recovery = 2.9% (144065984/4882680832) finish=453.6min speed=174080K/sec
bitmap: 27/37 pages [108KB], 65536KB chunk
A hotovo, vše ok:
root@MB:~# mdadm --detail /dev/md1 # také status
/dev/md1:
Version : 1.2
Creation Time : Thu May 5 16:24:29 2022
Raid Level : raid1
Array Size : 4882680832 (4656.49 GiB 4999.87 GB)
Used Dev Size : 4882680832 (4656.49 GiB 4999.87 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Sun Nov 27 11:56:15 2022
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : bitmap
Name : MB.JFila.cz:1 (local to host MB.JF)
UUID : db6464ea:3b6ac1a1:3a3f26ec:20cb5e65
Events : 70639
Number Major Minor RaidDevice State
3 8 1 0 active sync /dev/sda1
1 8 17 1 active sync /dev/sdb1