close
LAB:

Raid

1. create raid1

Disk /dev/sdb: 3253 MB, 3253469184 bytes
101 heads, 62 sectors/track, 1014 cylinders
Units = cylinders of 6262 * 512 = 3206144 bytes

Device Boot Start End Blocks Id System
/dev/sdb1 1 32 100161 82 Linux swap / Solaris
/dev/sdb2 33 64 100192 fd Linux raid autodetect
/dev/sdb3 65 96 100192 fd Linux raid autodetect
/dev/sdb4 97 1014 2874258 5 Extended
/dev/sdb5 97 128 100161 fd Linux raid autodetect
/dev/sdb6 129 160 100161 fd Linux raid autodetect
/dev/sdb7 161 192 100161 fd Linux raid autodetect

[root@server1 mnt]# mdadm -C /dev/md0 -l 1 -n 2 /dev/sdb{2,3}
mdadm: /dev/sdb2 appears to be part of a raid array:
level=raid1 devices=2 ctime=Thu Jul 23 21:12:54 2009
mdadm: /dev/sdb3 appears to be part of a raid array:
level=raid5 devices=4 ctime=Sat Jul 25 16:52:06 2009
Continue creating array? y
mdadm: array /dev/md0 started.


[root@server1 mnt]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4] [raid1]
md0 : active raid1 sdb3[1] sdb2[0]
100096 blocks [2/2] [UU]

unused devices:

2. mkfs.ext3

[root@server1 mnt]# mkfs.ext3 /dev/md0
mke2fs 1.39 (29-May-2006)
Filesystem label=
OS type: Linux
Block size=1024 (log=0)
Fragment size=1024 (log=0)
25064 inodes, 100096 blocks
5004 blocks (5.00%) reserved for the super user
First data block=1
Maximum filesystem blocks=67371008
13 block groups
8192 blocks per group, 8192 fragments per group
1928 inodes per group
Superblock backups stored on blocks:
8193, 24577, 40961, 57345, 73729

Writing inode tables: done
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done

This filesystem will be automatically checked every 27 mounts or
180 days, whichever comes first. Use tune2fs -c or -i to override.

[root@server1 mnt]# mount /dev/md0 /mnt/raid
[root@server1 mnt]# mount
/dev/mapper/VolGroup00-LogVol00 on / type ext3 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
/dev/sda1 on /boot type ext3 (rw)
tmpfs on /dev/shm type tmpfs (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
nfsd on /proc/fs/nfsd type nfsd (rw)
/dev/md0 on /mnt/raid type ext3 (rw)

3. test

[root@server1 raid]# touch f1.raid.test
[root@server1 raid]# touch f2.raid.test
[root@server1 raid]# ll
total 14
-rw-r--r-- 1 root root 0 Jul 25 17:17 f1.raid.test
-rw-r--r-- 1 root root 0 Jul 25 17:17 f2.raid.test

#列出/dev/md0
[root@server1 raid]# mdadm --detail /dev/md0
/dev/md0:
Version : 00.90.03
Creation Time : Sat Jul 25 17:13:52 2009
Raid Level : raid1
Array Size : 100096 (97.77 MiB 102.50 MB)
Used Dev Size : 100096 (97.77 MiB 102.50 MB)
Raid Devices : 2
Total Devices : 2
Preferred Minor : 0
Persistence : Superblock is persistent

Update Time : Sat Jul 25 17:18:04 2009
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0

UUID : c03f1c9d:2dd83de8:4522c38b:16119a65
Events : 0.2

Number Major Minor RaidDevice State
0 8 18 0 active sync /dev/sdb2
1 8 19 1 active sync /dev/sdb3


#寫入file: mdadm.conf
[root@server1 etc]# mdadm --detail --scan > /etc/mdadm.conf
[root@server1 etc]# cat /etc/mdadm.conf
ARRAY /dev/md0 level=raid1 num-devices=2 UUID=31d4e3c4:4669a1d3:f213f00e:b35a41c2
[root@server1 etc]#


stop /dev/md0
#mdadm -S /dev/md0


#重新啟動 raid (開機時會自動啟動raid)
#mdadm -As

[root@server1 etc]# mdadm -As
mdadm: /dev/md0 has been started with 2 drives.
[root@server1 etc]#
arrow
arrow
    全站熱搜

    aquatower 發表在 痞客邦 留言(0) 人氣()