Oracle ASM multipath udev 綁定設定
1. 安裝 multipath 模組
# rpm -qa | grep device-mapper-multipath
# yum install -y device-mapper-multipath
2. 啟用開機啟動 multipath 服務
# systemctl enable multipathd.service
# systemctl list-unit-files |grep multipath
3. 確認 multipath 模組是否已載入
# lsmod|grep dm_multipath
-- Load Module
# modprobe dm-multipath
# modprobe dm-round-robin
# lsmod|grep dm_multipath
dm_multipath 27792 14 dm_round_robin,dm_service_time
dm_mod 124501 45 dm_multipath,dm_log,dm_mirror
4. 產生多路徑設定,並啟動服務
# mpathconf --enable
# systemctl start multipathd.service
5. 查看 wwid
# cat /etc/multipath/bindings
mpatha 1VMware_VITDEVIDeacb8861a5ab421836115cb9018f9f10
mpathb 1VMware_VITDEVIDfbcb8861ccc5847e064e5cb9018f9c04
mpathc 1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8
mpathd 1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34
mpathe 1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34
mpathf 1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8
mpathg 1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04
mpathh 1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8
mpathi 1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8
mpathj 1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8
mpathk 1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8
mpathl 1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34
mpathm 1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10
mpathn 1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04
mpatho 3600140525608a5ab3ca41bebab2b46e9
# multipath -ll
mpathe (1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34) dm-14 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:2 sdk 8:160 active ready running
mpathd (1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34) dm-13 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:1 sdj 8:144 active ready running
mpathc (1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8) dm-12 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:0 sdi 8:128 active ready running
mpathn (1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04) dm-11 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:5 sdh 8:112 active ready running
mpathm (1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10) dm-10 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:4 sdg 8:96 active ready running
mpathl (1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34) dm-9 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:3 sdf 8:80 active ready running
mpathk (1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8) dm-8 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:2 sde 8:64 active ready running
mpathj (1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8) dm-7 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:1 sdd 8:48 active ready running
mpathi (1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8) dm-6 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:0 sdc 8:32 active ready running
mpathh (1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8) dm-17 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:5 sdn 8:208 active ready running
mpathg (1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04) dm-16 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:4 sdm 8:192 active ready running
mpathf (1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8) dm-15 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:3 sdl 8:176 active ready running
6. 設定 multipath.conf 文件
# vim /etc/multipath.conf
multipaths {
multipath {
wwid 1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34
alias FRA01_1
}
multipath {
wwid 1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8
alias FRA02_1
}
multipath {
wwid 1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8
alias DATA01_1
}
multipath {
wwid 1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34
alias DATA02_1
}
multipath {
wwid 1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04
alias OCR01_1
}
multipath {
wwid 1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8
alias OCR02_1
}
multipath {
wwid 1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8
alias DATA01_2
}
multipath {
wwid 1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8
alias DATA02_2
}
multipath {
wwid 1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8
alias FRA01_2
}
multipath {
wwid 1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34
alias FRA02_2
}
multipath {
wwid 1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10
alias OCR01_2
}
multipath {
wwid 1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04
alias OCR02_2
}
}
7. 重啟服務並檢查名稱是否已更正
# systemctl restart multipathd.service
# multipath -ll
OCR01_2 (1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10) dm-15 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:4 sdm 8:192 active ready running
OCR02_2 (1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04) dm-17 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:5 sdo 8:224 active ready running
OCR01_1 (1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04) dm-13 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:4 sdl 8:176 active ready running
OCR02_1 (1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8) dm-16 VMware ,Virtual SAN
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:5 sdn 8:208 active ready running
DATA01_2 (1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8) dm-6 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:0 sdd 8:48 active ready running
DATA02_2 (1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8) dm-9 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:1 sdf 8:80 active ready running
DATA01_1 (1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8) dm-7 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:0 sdc 8:32 active ready running
DATA02_1 (1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34) dm-8 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:1 sdg 8:96 active ready running
FRA01_2 (1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8) dm-11 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:2 sdi 8:128 active ready running
FRA02_2 (1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34) dm-14 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 34:0:0:3 sdk 8:160 active ready running
FRA01_1 (1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34) dm-10 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:2 sdh 8:112 active ready running
FRA02_1 (1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8) dm-12 VMware ,Virtual SAN
size=100G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 33:0:0:3 sdj 8:144 active ready running
# ls -ltr /dev/mapper/
total 0
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 FRA02_1 -> ../dm-12
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 OCR02_2 -> ../dm-17
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 OCR01_1 -> ../dm-13
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 FRA02_2 -> ../dm-14
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 OCR01_2 -> ../dm-15
lrwxrwxrwx. 1 root root 7 Feb 21 17:42 DATA01_1 -> ../dm-7
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 OCR02_1 -> ../dm-16
lrwxrwxrwx. 1 root root 7 Feb 21 17:42 DATA02_1 -> ../dm-8
lrwxrwxrwx. 1 root root 7 Feb 21 17:42 DATA02_2 -> ../dm-9
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 FRA01_1 -> ../dm-10
lrwxrwxrwx. 1 root root 8 Feb 21 17:42 FRA01_2 -> ../dm-11
lrwxrwxrwx. 1 root root 7 Feb 21 17:42 DATA01_2 -> ../dm-6
8. 在 /etc/udev/rules.d 建立 asm udev 檔案 (副檔名一定是 .rules,否則 udev 會讀不到)
# vim /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVIDa644b861306d2fc75d4c5cb9018f9f34", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="FRA01_1"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVIDb844b861afb20fda3e455cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="FRA02_1"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVIDd343b861ad1d078f28ab5cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="DATA01_1"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID5a44b861d4956f0fcad65cb9018f9f34", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="DATA02_1"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID4445b86124db439f17cd5cb9018f9c04", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="OCR01_1"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID6e45b861d1723d02371b5cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="OCR02_1"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID7593ba61198cc8b778b45cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="DATA01_2"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID8193ba61836cdd0166445cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="DATA02_2"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID8f93ba613b94159ebfc05cb9018f9fd8", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="FRA01_2"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID9d93ba613e0cd453b9915cb9018f9f34", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="FRA02_2"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVIDf993ba61bb9a711d93df5cb9018f9f10", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="OCR01_2"
KERNEL=="dm-*", ENV{DM_UUID}=="mpath-1VMware_VITDEVID0994ba61f501edaac8c55cb9018f9c04", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="OCR02_2"
KERNEL=="sd*", ENV{ID_SERIAL}=="3600140525608a5ab3ca41bebab2b46e9", OWNER="grid", GROUP="asmadmin", MODE="0660", SYMLINK+="OCR01_3"
9. 重讀設定檔
# /sbin/udevadm control --reload-rules
# /sbin/udevadm trigger
10. 檢查是否生效
# ls -l /dev/[DATA,OCR,FRA]*_*
lrwxrwxrwx. 1 root root 4 Feb 22 10:09 /dev/DATA01_1 -> dm-7
lrwxrwxrwx. 1 root root 4 Feb 22 10:09 /dev/DATA01_2 -> dm-6
lrwxrwxrwx. 1 root root 4 Feb 22 10:09 /dev/DATA02_1 -> dm-8
lrwxrwxrwx. 1 root root 4 Feb 22 10:09 /dev/DATA02_2 -> dm-9
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/FRA01_1 -> dm-10
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/FRA01_2 -> dm-11
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/FRA02_1 -> dm-12
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/FRA02_2 -> dm-14
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/OCR01_1 -> dm-13
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/OCR01_2 -> dm-15
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/OCR02_1 -> dm-16
lrwxrwxrwx. 1 root root 5 Feb 22 10:09 /dev/OCR02_2 -> dm-17
# ls -l /dev/dm-*
brw-rw----. 1 root disk 253, 0 Feb 22 10:09 /dev/dm-0
brw-rw----. 1 root disk 253, 1 Feb 22 10:09 /dev/dm-1
brw-rw----. 1 grid asmadmin 253, 10 Feb 22 10:09 /dev/dm-10
brw-rw----. 1 grid asmadmin 253, 11 Feb 22 10:09 /dev/dm-11
brw-rw----. 1 grid asmadmin 253, 12 Feb 22 10:09 /dev/dm-12
brw-rw----. 1 grid asmadmin 253, 13 Feb 22 10:09 /dev/dm-13
brw-rw----. 1 grid asmadmin 253, 14 Feb 22 10:09 /dev/dm-14
brw-rw----. 1 grid asmadmin 253, 15 Feb 22 10:09 /dev/dm-15
brw-rw----. 1 grid asmadmin 253, 16 Feb 22 10:09 /dev/dm-16
brw-rw----. 1 grid asmadmin 253, 17 Feb 22 10:09 /dev/dm-17
brw-rw----. 1 root disk 253, 2 Feb 22 10:09 /dev/dm-2
brw-rw----. 1 root disk 253, 3 Feb 22 10:09 /dev/dm-3
brw-rw----. 1 root disk 253, 4 Feb 22 10:09 /dev/dm-4
brw-rw----. 1 root disk 253, 5 Feb 22 10:09 /dev/dm-5
brw-rw----. 1 grid asmadmin 253, 6 Feb 22 10:09 /dev/dm-6
brw-rw----. 1 grid asmadmin 253, 7 Feb 22 10:09 /dev/dm-7
brw-rw----. 1 grid asmadmin 253, 8 Feb 22 10:09 /dev/dm-8
brw-rw----. 1 grid asmadmin 253, 9 Feb 22 10:09 /dev/dm-9
11. 將 multipath.conf 傳到其他台並重啟 multipathd 服務
# scp /etc/multipath.conf root@oraext2:/etc/
# ssh oraext2 systemctl restart multipathd.service
12. 將 99-oracle-asmdevices.rules 傳到其他台並重讀 udev 服務
# scp /etc/udev/rules.d/99-oracle-asmdevices.rules oraext2:/etc/udev/rules.d/
# ssh oraext2 udevadm control --reload-rules
# ssh oraext2 udevadm trigger
已轉向 blogger 記錄
https://slowlife-notes.blogspot.com