㈠ 如何使用Linux自帶多路徑DM
一、多路徑解釋
多路徑,顧名思義就是有多種選擇的路徑。在SAN或IPSAN環境,主機和存儲之間外加了光纖交換機,這就導致主機和存儲之間交換速度和效率增強,一條路徑肯定是不行的,也是不安全不穩定的。多路徑就是要來解決從主機到磁碟之間最快,最高效的問題。主要實現如下幾個功能
故障的切換和恢復
IO流量的負載均衡
磁碟的虛擬化
多路徑之前一直是存儲廠商負責解決,竟來被拆分出來單獨賣錢了。
構架基本是這樣的:存儲,多路徑軟體,光纖交換機,主機,主機系統。
二、LINUX下的multipath
1、查看是否自帶安裝?
1
2
3
4
5
6
[root@web2 multipath]# rpm -qa|grep device
device-mapper-1.02.39-1.el5
device-mapper-1.02.39-1.el5
device-mapper-multipath-0.4.7-34.el5
device-mapper-event-1.02.39-1.el5
[root@web2 multipath]#
2、安裝
1
2
3
4
5
6
rpm -ivh device-mapper-1.02.39-1.el5.rpm #安裝映射包
rpm -ivh device-mapper-multipath-0.4.7-34.el5.rpm #安裝多路徑包
外加加入開機啟動
chkconfig –level 2345 multipathd on #設置成開機自啟動multipathd
lsmod |grep dm_multipath #來檢查安裝是否正常
3、配置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# on the default devices.
blacklist {
devnode "^(ram|raw|loop|fd|md|dm-|sr|sr|scd|st)[0-9]*"
devnode "^hd[a-z]"
}
devices {
device {
vendor "HP"
path_grouping_policy multibus
features "1 queue_if_no_path"
path_checker readsector()
failback immediate
}
}<br><br>完整的配置如下:
blacklist {
devnode "^sda"
}
defaults {
user_friendly_names no
}
multipaths {
multipath {
wwid
alias iscsi-dm0
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
multipath {
wwid
alias iscsi-dm1
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
multipath {
wwid
alias iscsi-dm2
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
multipath {
wwid
alias iscsi-dm3
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
}
devices {
device {
vendor "iSCSI-Enterprise"
proct "Virtual disk"
path_grouping_policy multibus
getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
path_checker readsector0
path_selector "round-robin 0"
}
}
4、命令
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
[root@web2 ~]# multipath -h
multipath-tools v0.4.7 (03/12, 2006)
Usage: multipath [-v level] [-d] [-h|-l|-ll|-f|-F|-r]
[-p failover|multibus|group_by_serial|group_by_prio]
[device]
-v level verbosity level
0 no output
1 print created devmap names only
2 default verbosity
3 print debug information
-h print this usage text
-b file bindings file location
-d dry run, do not create or update devmaps
-l show multipath topology (sysfs and DM info)
-ll show multipath topology (maximum info)
-f flush a multipath device map
-F flush all multipath device maps
-r force devmap reload
-p policy force all maps to specified policy :
failover 1 path per priority group
multibus all paths in 1 priority group
group_by_serial 1 priority group per serial
group_by_prio 1 priority group per priority lvl
group_by_node_name 1 priority group per target node
device limit scope to the device's multipath
(udev-style $DEVNAME reference, eg /dev/sdb
or major:minor or a device map name)
[root@web2 ~]#
5、啟動關閉
1
2
3
4
# /etc/init.d/multipathd start #開啟mulitipath服務
service multipath start
service multipath restart
service multipath shutdown
6、如何獲取wwid
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
1、
[root@vxfs01 ~]# cat /var/lib/multipath/bindings
# Multipath bindings, Version : 1.0
# NOTE: this file is automatically maintained by the multipath program.
# You should not need to edit this file in normal circumstances.
#
# Format:
# alias wwid
#
mpath0
mpath1
mpath2
mpath3
mpath4
2、
[root@vxfs01 ~]# multipath -v3 |grep 3600
sdb: uid = (callout)
sdc: uid = (callout)
sdd: uid = (callout)
sde: uid = (callout)
1:0:0:0 sdb 8:16 0 [undef][ready] DGC,RAI
1:0:1:0 sdc 8:32 1 [undef][ready] DGC,RAI
2:0:0:0 sdd 8:48 1 [undef][ready] DGC,RAI
2:0:1:0 sde 8:64 0 [undef][ready] DGC,RAI
Found matching wwid [] in bindings file.
比較詳細的文字:
http://zhumeng8337797.blog.163.com/blog/static/1007689142013416111534352/
http://blog.csdn.net/wuweilong/article/details/14184097
RHEL官網資料:
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-5-DM_Multipath-en-US.pdf
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-5-DM_Multipath-zh-CN.pdf
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-6-DM_Multipath-en-US.pdf
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-6-DM_Multipath-zh-CN.pdf
㈡ RH linux 5.9 下做多路徑聚合 問題,不知道怎樣才算配置成功,路徑聚合成功 (存儲是HP EVA4400+ )
應該是配置成功了,樓上的網友回答得相當專業。
一般情況下,Linux配完multipath後用fdisk -l查看,重復的磁碟還是能看得到。應該看到的是n多個/dev/sd*和後來生出來的/dev/dm-*(與mpath*分別對應)。這點與Windows上有所不用,我記得Windows上配完多路徑後是看不到重復的盤了。
你存儲上有7個LUN的話,那應該是7個dm-*才對啊,怎麼有8個呢?不解。。。
還有要注意的是:應該使用/dev/mapper/mpath*(multipath虛擬出來的多路徑設備),對它進行分區等操作。/dev/dm-* 是軟體內部自身使用的,不要用。
㈢ Linux多路徑配置
如果使用了多路徑棚臘野方案,可以直接使用multipath綁定設備名鏈喊不需要用到asmlib或UDEV請直接參考文檔:Configuringnon-(11.1.0,11.2.0)onRHEL5/OL5[ID605828.1][root@vrh1~]#foriin`cat/proc/partitions|awk'局橋{print$4}'|grepsd|grep[a-z]$`;doecho"###$i:`scsi_id-g-u-s/block/$i`";done###sda:SATA_VBOX_HARDDISK_VB83d4445f-b8790695_###sdb:SATA_VBOX_HARDDISK_VB0db2f233-269850e0_###sdc:SATA_VBOX_HARDDISK_VBa56f2571-0dd27b33_###sdd:SATA_VBOX_HARDDISK_VBf6b74ff7-871d1de8_###sde:SATA_VBOX_HARDDISK_VB5a531910-25f4eb9a_###sdf:SATA_VBOX_HARDDISK_VB4915e6e3-737b312e_###sdg:SATA_VBOX_HARDDISK_VB512c8f75-37f4a0e9_###sdh:SATA_VBOX_HARDDISK_VBc0115ef6-a48bc15d_###sdi:SATA_VBOX_HARDDISK_VB3a556907-2b72391d_###sdj:SATA_VBOX_HARDDISK_VB7ec8476c-08641bd4_###sdk:SATA_VBOX_HARDDISK_VB743e1567-d0009678_[root@vrh1~]#grep-v^#/etc/multipath.confdefaults{user_friendly_namesyes}defaults{udev_dir/devpolling_interval10selector"round-robin0"path_grouping_policyfailovergetuid_callout"/sbin/scsi_id-g-u-s/block/%n"prio_callout/bin/truepath_checkerreadsector0rr_min_io100rr_#no_path_retryfailuser_friendly_nameyes}devnode_blacklist{devnode"^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"devnode"^hd[a-z]"devnode"^cciss!c[0-9]d[0-9]*"}multipaths{multipath{wwidSATA_VBOX_HARDDISK_VB0db2f233-269850e0_aliasvoting1path_grouping_policyfailover}multipath{wwidSATA_VBOX_HARDDISK_VBa56f2571-0dd27b33_aliasvoting2path_grouping_policyfailover}multipath{wwidSATA_VBOX_HARDDISK_VBf6b74ff7-871d1de8_aliasvoting3path_grouping_policyfailover}multipath{wwidSATA_VBOX_HARDDISK_VB5a531910-25f4eb9a_aliasocr1path_grouping_policyfailover}multipath{wwidSATA_VBOX_HARDDISK_VB4915e6e3-737b312e_aliasocr2path_grouping_policyfailover}multipath{wwidSATA_VBOX_HARDDISK_VB512c8f75-37f4a0e9_aliasocr3path_grouping_policyfailover}}[root@vrh1~]#multipath[root@vrh1~]#multipath-llmpath2(SATA_VBOX_HARDDISK_VB3a556907-2b72391d_)dm-9ATA,VBOXHARDDISKsize=5.0Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-8:0:0:0sdi8:128activereadyrunningmpath1(SATA_VBOX_HARDDISK_VBc0115ef6-a48bc15d_)dm-8ATA,VBOXHARDDISKsize=5.0Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-7:0:0:0sdh8:112activereadyrunningocr3(SATA_VBOX_HARDDISK_VB512c8f75-37f4a0e9_)dm-7ATA,VBOXHARDDISKsize=5.0Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-6:0:0:0sdg8:96activereadyrunningocr2(SATA_VBOX_HARDDISK_VB4915e6e3-737b312e_)dm-6ATA,VBOXHARDDISKsize=5.0Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-5:0:0:0sdf8:80activereadyrunningocr1(SATA_VBOX_HARDDISK_VB5a531910-25f4eb9a_)dm-5ATA,VBOXHARDDISKsize=5.0Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-4:0:0:0sde8:64activereadyrunningvoting3(SATA_VBOX_HARDDISK_VBf6b74ff7-871d1de8_)dm-4ATA,VBOXHARDDISKsize=40Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-3:0:0:0sdd8:48activereadyrunningvoting2(SATA_VBOX_HARDDISK_VBa56f2571-0dd27b33_)dm-3ATA,VBOXHARDDISKsize=40Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-2:0:0:0sdc8:32activereadyrunningvoting1(SATA_VBOX_HARDDISK_VB0db2f233-269850e0_)dm-2ATA,VBOXHARDDISKsize=40Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-1:0:0:0sdb8:16activereadyrunningmpath4(SATA_VBOX_HARDDISK_VB743e1567-d0009678_)dm-11ATA,VBOXHARDDISKsize=5.0Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-10:0:0:0sdk8:160activereadyrunningmpath3(SATA_VBOX_HARDDISK_VB7ec8476c-08641bd4_)dm-10ATA,VBOXHARDDISKsize=5.0Gfeatures='0'hwhandler='0'wp=rw`-+-policy='round-robin0'prio=1status=active`-9:0:0:0sdj8:144activereadyrunning[root@vrh1~]#dmsetupls|sortmpath1(253,8)mpath2(253,9)mpath3(253,10)mpath4(253,11)ocr1(253,5)ocr2(253,6)ocr3(253,7)VolGroup00-LogVol00(253,0)VolGroup00-LogVol01(253,1)voting1(253,2)voting2(253,3)voting3(253,4)[root@vrh1~]#ls-l/dev/mapper/*crw-------1rootroot10,62Oct1709:58/dev/mapper/controlbrw-rw----1rootdisk253,8Oct1900:11/dev/mapper/mpath1brw-rw----1rootdisk253,9Oct1900:11/dev/mapper/mpath2brw-rw----1rootdisk253,10Oct1900:11/dev/mapper/mpath3brw-rw----1rootdisk253,11Oct1900:11/dev/mapper/mpath4brw-rw----1rootdisk253,5Oct1900:11/dev/mapper/ocr1brw-rw----1rootdisk253,6Oct1900:11/dev/mapper/ocr2brw-rw----1rootdisk253,7Oct1900:11/dev/mapper/ocr3brw-rw----1rootdisk253,0Oct1709:58/dev/mapper/VolGroup00-LogVol00brw-rw----1rootdisk253,1Oct1709:58/dev/mapper/VolGroup00-LogVol01brw-rw----1rootdisk253,2Oct1900:11/dev/mapper/voting1brw-rw----1rootdisk253,3Oct1900:11/dev/mapper/voting2brw-rw----1rootdisk253,4Oct1900:11/dev/mapper/voting3[root@vrh1~]#ls-l/dev/dm*brw-rw----1rootroot253,0Oct1709:58/dev/dm-0brw-rw----1rootroot253,1Oct1709:58/dev/dm-1brw-rw----1rootroot253,10Oct1900:11/dev/dm-10brw-rw----1rootroot253,11Oct1900:11/dev/dm-11brw-rw----1rootroot253,2Oct1900:11/dev/dm-2brw-rw----1rootroot253,3Oct1900:11/dev/dm-3brw-rw----1rootroot253,4Oct1900:11/dev/dm-4brw-rw----1rootroot253,5Oct1900:11/dev/dm-5brw-rw----1rootroot253,6Oct1900:11/dev/dm-6brw-rw----1rootroot253,7Oct1900:11/dev/dm-7brw-rw----1rootroot253,8Oct1900:11/dev/dm-8brw-rw----1rootroot253,9Oct1900:11/dev/dm-9[root@vrh1~]#ls-l/dev/disk/by-id/:58scsi-SATA_VBOX_HARDDISK_VB0db2f233-269850e0->../../asm-:58scsi-SATA_VBOX_HARDDISK_VB3a556907-2b72391d->../../asm-:58scsi-SATA_VBOX_HARDDISK_VB4915e6e3-737b312e->../../asm-:58scsi-SATA_VBOX_HARDDISK_VB512c8f75-37f4a0e9->../../asm-:58scsi-SATA_VBOX_HARDDISK_VB5a531910-25f4eb9a->../../asm-:58scsi-SATA_VBOX_HARDDISK_VB743e1567-d0009678->../../asm-:58scsi-SATA_VBOX_HARDDISK_VB7ec8476c-08641bd4->../../asm-:58scsi-SATA_VBOX_HARDDISK_VB83d4445f-b8790695->../../:58scsi-SATA_VBOX_HARDDISK_VB83d4445f-b8790695-part1->../../:58scsi-SATA_VBOX_HARDDISK_VB83d4445f-b8790695-part2->../../:58scsi-SATA_VBOX_HARDDISK_VBa56f2571-0dd27b33->../../asm-:58scsi-SATA_VBOX_HARDDISK_VBc0115ef6-a48bc15d->../../asm-:58scsi-SATA_VBOX_HARDDISK_VBf6b74ff7-871d1de8->../../asm-diskdReportAbuseLike(0)2.Re:asm磁碟使用鏈路聚合設備名,IO性能只有非聚合設備的1/6!LiuMaclean(劉相兵)ExpertLiuMaclean(劉相兵)Jul21,201311:09AM(inresponseto13628)step1:[oracle@vrh8mapper]$cat/etc/multipath.confmultipaths{multipath{wwidSATA_VBOX_HARDDISK_VBf6b74ff7-871d1de8_aliasasm-disk1mode660uid501gid503}multipath{wwidSATA_VBOX_HARDDISK_VB0db2f233-269850e0_aliasasm-disk2mode660uid501gid503}multipath{wwidSATA_VBOX_HARDDISK_VBa56f2571-0dd27b33_aliasasm-disk3mode660uid501gid503}}第二步:第三步:[oracle@vrh8mapper]$ls-l/dev/mapper/asm-disk*brw-rw----1gridasmadmin253,4Jul2107:02/dev/mapper/asm-disk1brw-rw----1gridasmadmin253,2Jul2107:02/dev/mapper/asm-disk2brw-rw----1gridasmadmin253,3Jul2107:02/dev/mapper/asm-disk3
㈣ linux 多路徑存儲是怎麼回事
Linux下HDS存儲多路徑查看
在Redhat下確定需要劃分的存儲空間。在本例中需要進行劃分的空間是從HDS AMS2000上劃分到伺服器的多路徑存儲空間。其中sddlmad為ycdb1上需要進行劃分的空間,sddlmah為ycdb2上需要進行劃分的空間。具體如下:
查看環境
# rpm -qa|grep device-mapper
device-mapper-event-1.02.32-1.el5
device-mapper-multipath-0.4.7-30.el5
device-mapper-1.02.32-1.el5
# rpm -qa|grep lvm2 lvm2-2.02.46-8.el5
查看空間
#fdisk -l
Disk /dev/sddlmad: 184.2 GB, 184236900352 bytes 255 heads, 63 sectors/track, 22398 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sddlmah: 184.2 GB, 184236900352 bytes
255 heads, 63 sectors/track, 22398 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes
查看存儲
#cd /opt/DynamicLinkManager/bin/
#./dlnkmgr view -lu
Proct : AMS
SerialNumber : 83041424 LUs : 8
iLU HDevName Device PathID Status
0000 sddlmaa /dev/sdb 000000 Online
/dev/sdj 000008 Online
/dev/sdr 000016 Online
/dev/sdz 000017 Online
0001 sddlmab /dev/sdc 000001 Online
/dev/sdk 000009 Online
/dev/sds 000018 Online
/dev/sdaa 000019 Online
0002 sddlmac /dev/sdd 000002 Online
/dev/sdl 000010 Online
/dev/sdt 000020 Online
/dev/sdab 000021 Online
0003 sddlmad /dev/sde 000003 Online
/dev/sdm 000011 Online
/dev/s 000022 Online
/dev/sdac 000023 Online
0004 sddlmae /dev/sdf 000004 Online
/dev/sdn 000012 Online
/dev/sdv 000024 Online
/dev/sdad 000025 Online
0005 sddlmaf /dev/sdg 000005 Online
/dev/sdo 000013 Online
/dev/sdw 000026 Online
/dev/sdae 000027 Online
0006 sddlmag /dev/sdh 000006 Online
/dev/sdp 000014 Online
/dev/sdx 000028 Online
/dev/sdaf 000029 Online
0007 sddlmah /dev/sdi 000007 Online
/dev/sdq 000015 Online
/dev/sdy 000030 Online
/dev/sdag 000031 Online
##############################################################
4. lvm.conf的修改
為了能夠正確的使用LVM,需要修改其過濾器:
#cd /etc/lvm #vi lvm.conf
# By default we accept every block device
# filter = [ "a/.*/" ]
filter = [ "a|sddlm[a-p][a-p]|.*|","r|dev/sd|" ]
例:
[root@bsrunbak etc]# ls -l lvm*
[root@bsrunbak etc]# cd lvm
[root@bsrunbak lvm]# ls
archive backup cache lvm.conf
[root@bsrunbak lvm]# more lvm.conf
[root@bsrunbak lvm]# pvs
Last login: Fri Jul 10 11:17:21 2015 from 172.17.99.198
[root@bsrunserver1 ~]#
[root@bsrunserver1 ~]#
[root@bsrunserver1 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda4 30G 8.8G 20G 32% /
tmpfs 95G 606M 94G 1% /dev/shm
/dev/sda2 194M 33M 151M 18% /boot
/dev/sda1 200M 260K 200M 1% /boot/efi
/dev/mapper/datavg-oraclelv
50G 31G 17G 65% /oracle
172.16.110.25:/Tbackup
690G 553G 102G 85% /Tbackup
/dev/mapper/tmpvg-oradatalv
345G 254G 74G 78% /oradata
/dev/mapper/datavg-lvodc
5.0G 665M 4.1G 14% /odc
[root@bsrunserver1 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda5 datavg lvm2 a-- 208.06g 153.06g
/dev/sddlmba tmpvg lvm2 a-- 200.00g 49.99g
/dev/sddlmbb tmpvg lvm2 a-- 200.00g 0
[root@bsrunserver1 ~]# cd /etc/lvm
[root@bsrunserver1 lvm]# more lvm.conf
# Don't have more than one filter line active at once: only one gets
used.
# Run vgscan after you change this parameter to ensure that
# the cache file gets regenerated (see below).
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
# By default we accept every block device:
# filter = [ "a/.*/" ]
# Exclude the cdrom drive
# filter = [ "r|/dev/cdrom|" ]
# When testing I like to work with just loopback devices:
# filter = [ "a/loop/", "r/.*/" ]
# Or maybe all loops and ide drives except hdc:
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
# Use anchors if you want to be really specific
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
filter = [ "a|/dev/sddlm.*|", "a|^/dev/sda5$|", "r|.*|" ]
[root@bsrunserver1 lvm]# df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/sda4 30963708 9178396 20212448 32% /
tmpfs 99105596 620228 98485368 1% /dev/shm
/dev/sda2 198337 33546 154551 18% /boot
/dev/sda1 204580 260 204320 1% /boot/efi
/dev/mapper/datavg-oraclelv
51606140 31486984 17497716 65% /oracle
172.16.110.25:/Tbackup
722486368 579049760 106736448 85% /Tbackup
/dev/mapper/tmpvg-oradatalv
361243236 266027580 76865576 78% /oradata
/dev/mapper/datavg-lvodc
5160576 680684 4217748 14% /odc
[root@bsrunserver1 lvm]#
You have new mail in /var/spool/mail/root
[root@bsrunserver1 lvm]#
[root@bsrunserver1 lvm]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda5 datavg lvm2 a-- 208.06g 153.06g
/dev/sddlmba tmpvg lvm2 a-- 200.00g 49.99g
/dev/sddlmbb tmpvg lvm2 a-- 200.00g 0
[root@bsrunserver1 lvm]#
進入文件:
[root@bsrunbak lvm]# cd /opt/D*/bin
or
[root@bsrunbak bin]# pwd
/opt/DynamicLinkManager/bin
顯示HDS存儲卷:
[root@bsrunbak lvm]# ./dlnkmgr view -lu