zoukankan      html  css  js  c++  java
  • RAC修改public ip,vip,priv_ip,sacn_ip

    [grid@rac1 ~]$ oifcfg getif
    eth0 192.168.220.0 global public
    eth1 192.168.11.0 global cluster_interconnect
    [grid@rac1 ~]$ oifcfg iflist
    eth0 192.168.220.0
    eth1 192.168.11.0
    eth1 169.254.0.0
    [grid@rac1 ~]$ cat /etc/hosts
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    192.168.220.130 rac1
    192.168.220.140 rac2
    192.168.220.135 rac1-vip
    192.168.220.145 rac2-vip
    192.168.11.11 rac1-priv
    192.168.11.22 rac2-priv
    192.168.220.150 rac-scan


    修改后的IP
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    192.168.110.11 rac1
    192.168.110.22 rac2
    192.168.110.111 rac1-vip
    192.168.110.222 rac2-vip
    10.10.11.30 rac1-priv
    10.10.11.40 rac2-priv
    192.168.110.50 rac-scan

    更改私网IP
    ----私网IP需要再集群启动的时候进行修改再重启
    确认CRS集群在各节点启动:
    [root@rac2 grid]# olsnodes -s
    rac1 Active
    rac2 Active

    --查看当前设定:
    [root@rac2 grid]# oifcfg getif
    eth0 192.168.220.0 global public
    eth1 192.168.11.0 global cluster_interconnect

    [root@rac1 grid]# oifcfg setif -global eth1/'10.10.11.0':cluster_interconnect
    [root@rac1 grid]# oifcfg getif
    eth0 192.168.220.0 global public
    eth1 192.168.11.0 global cluster_interconnect
    eth1 10.10.11.0 global cluster_interconnect

    srvctl stop database -d racdb
    srvctl stop listener
    crsctl stop crs
    注意:这样再次启动crs时,就不会自动启动监听和数据库了。


    更改网卡IP
    vi /etc/sysconfig/network-scripts/ifcfg-eth0
    service network restart

    [root@rac1 grid]# ifconfig
    eth0 Link encap:Ethernet HWaddr 08:00:27:59:BF:67
    inet addr:192.168.110.11 Bcast:192.168.110.255 Mask:255.255.255.0
    inet6 addr: fe80::a00:27ff:fe59:bf67/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:5375 errors:0 dropped:0 overruns:0 frame:0
    TX packets:3116 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:644308 (629.2 KiB) TX bytes:522464 (510.2 KiB)

    eth1 Link encap:Ethernet HWaddr 08:00:27:BA:70:95
    inet addr:10.10.11.30 Bcast:10.10.11.255 Mask:255.255.255.0
    inet6 addr: fe80::a00:27ff:feba:7095/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:86353 errors:0 dropped:0 overruns:0 frame:0
    TX packets:108711 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:50509157 (48.1 MiB) TX bytes:81741534 (77.9 MiB)
    [root@rac2 grid]# ifconfig
    eth0 Link encap:Ethernet HWaddr 08:00:27:E5:AD:8D
    inet addr:192.168.110.22 Bcast:192.168.110.255 Mask:255.255.255.0
    inet6 addr: fe80::a00:27ff:fee5:ad8d/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:4150 errors:0 dropped:0 overruns:0 frame:0
    TX packets:2677 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:507964 (496.0 KiB) TX bytes:425725 (415.7 KiB)

    eth1 Link encap:Ethernet HWaddr 08:00:27:1B:45:6F
    inet addr:10.10.11.40 Bcast:10.10.11.255 Mask:255.255.255.0
    inet6 addr: fe80::a00:27ff:fe1b:456f/64 Scope:Link
    UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
    RX packets:109860 errors:0 dropped:0 overruns:0 frame:0
    TX packets:84914 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:81839755 (78.0 MiB) TX bytes:50373028 (48.0 MiB)



    启动CRS,可以看到数据库和监听没有启动
    [root@rac1 grid]# crsctl start crs

    [root@rac1 grid]# crsctl stat res -t
    --------------------------------------------------------------------------------
    NAME TARGET STATE SERVER STATE_DETAILS
    --------------------------------------------------------------------------------
    Local Resources
    --------------------------------------------------------------------------------
    ora.DATADG.dg
    ONLINE ONLINE rac1
    ONLINE ONLINE rac2
    ora.LISTENER.lsnr
    OFFLINE OFFLINE rac1
    OFFLINE OFFLINE rac2
    ora.OCRDG.dg
    ONLINE ONLINE rac1
    ONLINE ONLINE rac2
    ora.asm
    ONLINE ONLINE rac1 Started
    ONLINE ONLINE rac2 Started
    ora.gsd
    OFFLINE OFFLINE rac1
    OFFLINE OFFLINE rac2
    ora.net1.network
    ONLINE OFFLINE rac1
    ONLINE OFFLINE rac2
    ora.ons
    ONLINE OFFLINE rac1
    ONLINE OFFLINE rac2
    ora.registry.acfs
    ONLINE ONLINE rac1
    ONLINE ONLINE rac2
    --------------------------------------------------------------------------------
    Cluster Resources
    --------------------------------------------------------------------------------
    ora.LISTENER_SCAN1.lsnr
    1 ONLINE OFFLINE
    ora.cvu
    1 ONLINE OFFLINE
    ora.oc4j
    1 ONLINE ONLINE rac1
    ora.rac1.vip
    1 ONLINE OFFLINE
    ora.rac2.vip
    1 ONLINE OFFLINE
    ora.racdb.db
    1 OFFLINE OFFLINE Instance Shutdown
    2 OFFLINE OFFLINE Instance Shutdown
    ora.scan1.vip
    1 ONLINE OFFLINE


    [root@rac1 grid]# oifcfg getif
    eth0 192.168.220.0 global public
    eth1 192.168.11.0 global cluster_interconnect
    eth1 10.10.11.0 global cluster_interconnect
    --删除旧值并设定新值
    [root@rac1 grid]# oifcfg delif -global eth1/'192.168.11.0'
    [root@rac1 grid]# oifcfg getif
    eth0 192.168.220.0 global public
    eth1 10.10.11.0 global cluster_interconnect

    -----修改VIP
    当前vip已经是停止状态
    srvctl stop vip -n rac1
    srvctl stop vip -n rac2


    [root@rac2 grid]# srvctl config vip -n rac1
    VIP exists: /rac1-vip/192.168.110.111/192.168.220.0/255.255.255.0/eth0, hosting node rac1
    [root@rac2 grid]# srvctl config vip -n rac2
    VIP exists: /rac2-vip/192.168.110.222/192.168.220.0/255.255.255.0/eth0, hosting node rac2

    [root@rac1 grid]# srvctl config vip -n rac1
    VIP exists: /rac1-vip/192.168.110.111/192.168.110.0/255.255.255.0/eth0, hosting node rac1
    [root@rac1 grid]# srvctl config vip -n rac2
    VIP exists: /192.168.110.222/192.168.110.222/192.168.110.0/255.255.255.0/eth0, hosting node rac2
    [root@rac1 grid]#

    srvctl config vip -n rac1
    srvctl config vip -n rac2


    修改VIP设置:
    srvctl modify nodeapps -n rac1 -A 192.168.110.111/255.255.255.0/eth0
    srvctl modify nodeapps -n rac2 -A 192.168.110.222/255.255.255.0/eth0


    192.168.110.111 rac1-vip
    192.168.110.222 rac2-vip


    启动vip

    srvctl start vip -n rac1
    srvctl start vip -n rac2
    启动监听:

    srvctl start listener
    启动数据库:

    srvctl start database -d racdb
    确认local_listener信息:
    检查local_listener信息,如果不正确需要修改,我这里查询是正确的。

    --两个节点分别确认:
    show parameter local_listener


    修改SCAN VIP

    查看SCAN VIP当前设置:
    [root@rac1 grid]# srvctl config scan
    SCAN name: rac-scan, Network: 1/192.168.110.0/255.255.255.0/eth0
    SCAN VIP name: scan1, IP: /rac-scan/192.168.220.150

    [grid@rac1 cssd]$ srvctl stop scan_listener
    [grid@rac1 cssd]$ srvctl stop scan

    [grid@rac1 cssd]$ srvctl status scan_listener
    SCAN Listener LISTENER_SCAN1 is enabled
    SCAN listener LISTENER_SCAN1 is not running
    [grid@rac1 cssd]$ srvctl status scan
    SCAN VIP scan1 is enabled
    SCAN VIP scan1 is not running

    使用root用户修改SCAN VIP
    注意:root用户执行修改,且确认/etc/hosts映射关系已修改为新的scan VIP。
    [root@rac1 grid]# srvctl modify scan -n rac-scan
    [root@rac1 grid]#

    [grid@rac1 cssd]$ srvctl modify scan_listener -u
    [grid@rac1 cssd]$

    [root@rac1 grid]# srvctl config scan
    SCAN name: rac-scan, Network: 1/192.168.110.0/255.255.255.0/eth0
    SCAN VIP name: scan1, IP: /rac-scan/192.168.110.50


    [root@rac1 grid]# srvctl config scan_listener
    SCAN Listener LISTENER_SCAN1 exists. Port: TCP:1521
    [root@rac1 grid]#


    至此修改完成,学习巩固下这个操作

  • 相关阅读:
    BAT脚本批量调用Sql执行文件 (SqlServer 数据库)
    树莓派系统刻录到首次登陆等问题
    数据库视图的使用
    MQ配置安装
    PLSQL集合类型
    PLSQL-包函数存储过程
    Oracle 字段拆分替换在合并成一条
    ORACLE-EXP和IMP方法介绍
    javascript几个月前的时间
    返回顶部实现方式
  • 原文地址:https://www.cnblogs.com/nadian-li/p/13746940.html
Copyright © 2011-2022 走看看