zoukankan      html  css  js  c++  java
  • mycat1.6.6.1读写分离-分库分表-keepalived高可用-mysql主从

    MySQL主从复制
    环境
    MySQL5.7 Centos7 主库主机地址:192.168.92.14 从库主机地址:192.168.92.16

    --查看mysql binlog日志中的sql
    mysqlbinlog --base64-output=decode-rows -v mysql-bin.000001 > /opt/binlog.sql

    --主库配置
    --修改master配置,开启binlog
    vi /etc/my.cnf

    log-bin=mysql-bin
    server_id=1

    --重启MySQL
    systemctl restart mysqld
    mysql -uroot -poracle
    grant replication slave on *.* to 'root'@'192.168.92.16' identified by 'oracle';
    --如果提示错误,添加关闭密码校验功能
    vi /etc/my.cnf
    validate_password=off
    --查看是否设置log-bin成功
    show variables like '%log_bin%';

    show master status;

    --从库配置
    --若主备库的auto.cnf中显示server_uuid相同,更改备库server_uuid
    mysql -uroot -poracle
    show variables like '%server_uuid%';

    cd var/lib/mysql
    more auto.cnf
    mv auto.cnf auto.cnf.bak
    systemctl restart mysqld

    vi /etc/my.cnf
    server_id=2

    systemctl restart mysqld

    mysql -uroot -poracle

    CHANGE MASTER TO
    MASTER_HOST='192.168.92.14',
    MASTER_USER='root',
    MASTER_PASSWORD='oracle';

    /* MASTER_PORT=3306,
    MASTER_LOG_FILE='mysql-bin.000001',
    MASTER_LOG_POS=213; */

    start slave;

    show slave status G;

    --当“Slave_SQL_Running” 为否时,解决方法执行如下命令:
    stop slave;
    set GLOBAL SQL_SLAVE_SKIP_COUNTER=1;start slave;
    start slave;
    show slave status G

    --取消主从复制
    stop slave;
    reset slave;

    --测试
    --主库
    create database orderdb;
    create database userdb;

    create table orderdb.t_order(uuid int,name varchar(200),primary key(uuid));
    insert into orderdb.t_order select 1,'order1';
    insert into orderdb.t_order select 2,'order2';

    create table orderdb.company(uuid int,name varchar(200),primary key(uuid));
    insert into orderdb.company select 1,'order1';
    insert into orderdb.company select 2,'order2';

    use userdb;
    create table userdb.t_user(uuid int primary key,name varchar(200));
    insert into userdb.t_user select 1,'user1';

    select * from orderdb.t_order;
    select * from orderdb.company;
    select * from userdb.t_user;

    --------------------------------------------------------------------------------------------------------------
    --mycat1.6.6.1读写分离
    --主备库赋予权限
    mysql -uroot -poracle
    grant all privileges on *.* to root@'%' identified by 'oracle';
    flush privileges;
    exit

    cd /opt
    rpm -ivh jdk-8u162-linux-x64.rpm
    tar -zxvf Mycat-server-1.6.6.1-release-20181031195535-linux.tar.gz

    vi /etc/profile

    export JAVA_HOME=/usr/java/latest
    #export CLASSPATH=.:$JAVA_HOME/lib
    #export PATH=$JAVA_HOME/bin:$PATH
    export CLASSPATH=.:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$CLASSPATH
    export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
    export MYCAT_HOME=/opt/mycat
    export PATH=$PATH:$MYCAT_HOME/bin

    source /etc/profile
    --如果配置文件不正确,则导致:JVM did not exit on request, terminated
    --如下配置company表只允许在主库写而不允许读
    vi schema.xml

    <?xml version="1.0"?>
    <!DOCTYPE mycat:schema SYSTEM "schema.dtd">
    <mycat:schema xmlns:mycat="http://io.mycat/">
        <schema name="mydb1" checkSQLschema="true" sqlMaxLimit="100">
            <table name="t_order" primaryKey="uuid" autoIncrement="true" dataNode="dn1"/>
            <table name="t_user" primaryKey="uuid" autoIncrement="true" dataNode="dn2"/>
            <table name="company" primaryKey="ID" type="global" dataNode="dn3" />
        </schema>
        
        <dataNode name="dn1" dataHost="dhost1" database="orderdb" />
        <dataNode name="dn2" dataHost="dhost1" database="userdb" />
        <dataNode name="dn3" dataHost="dhost2" database="userdb" />
        
        <dataHost name="dhost1" maxCon="1000" minCon="10" balance="1" writeType="0" dbType="mysql" dbDriver="native">
            <heartbeat>select user()</heartbeat>
            <writeHost host="myhostM1" url="192.168.92.14:3306" user="root" password="oracle">
                <readHost host="myhostS1" url="192.168.92.16:3306"
    user="root" password="oracle" weight="1" />
            </writeHost>
        </dataHost>
        <dataHost name="dhost2" maxCon="1000" minCon="10" balance="1" writeType="0" dbType="mysql" dbDriver="native">
            <heartbeat>select user()</heartbeat>
            <writeHost host="myhostM2" url="192.168.92.14:3306" user="root"    password="oracle"></writeHost>
        </dataHost>
    </mycat:schema>

    --最后的user配置如下,注释掉最后的多余的mycat的schema:TESTDB

    vi server.xml

    <?xml version="1.0" encoding="UTF-8"?>
    <!DOCTYPE mycat:server SYSTEM "server.dtd">
    <mycat:server xmlns:mycat="http://io.mycat/">
        <user name="root">
            <property name="password">123456</property>
            <property name="schemas">mydb1</property>
        </user>
    
        <user name="user">
            <property name="password">user</property>
            <property name="schemas">mydb1</property>
            <property name="readOnly">true</property>
        </user>
    
    </mycat:server>

    --mycat启动错误
    Startup failed: Timed out waiting for a signal from the JVM.
    JVM did not exit on request, terminated
    解决办法:在wrapper.conf中添加,本例中少wrapper.startup.timeout
    vi wrapper.conf 添加如下

    #超时时间300秒
    wrapper.startup.timeout=300
    #wrapper.ping.timeout=120

    --修改日志模式为debug
    vi /opt/mycat/conf/log4j2.xml info修改为debug
    <asyncRoot level="debug" includeLocation="true">

    <!--<AppenderRef ref="Console" />-->
    <AppenderRef ref="RollingFile"/>

    </asyncRoot>

    --以前的版本参考:
    <root>
    <level value="debug" />
    <appender-ref ref="FILE" />
    <!--<appender-ref ref="FILE" />-->
    </root>


    --修改conf下的配置后可能导致mycat服务起不来
    -- 启动mycat 参考startup_nowrap.sh

    mycat start &
    --navicat通过8066端口连接mycat,如下为命令行方式连接mycat
    mysql -uroot -h192.168.92.14 -P8066 -poracle

    --查询debug日志,如下为日志中对应的执行的sql和对应的执行节点,也可以根据attachment关键字确定
    tail -f /opt/mycat/logs/mycat.log

    to send query cmd:
    select user()
    in pool
    DBHostConfig [hostName=myhostM1, url=192.168.92.14:3306]

    --navicat测试读写分离
    select * from orderdb.t_order;
    insert into orderdb.t_order select 16,'user16';
    --只能写,读报错
    select * from orderdb.company ;
    insert into orderdb.company select 6,'company6';
    ------------------------------------------------------------------------------------------
    --keepalived高可用配置-for mysql主从库操作
    --关闭selinux
    vi /etc/selinux/config
    SELINUX=disabled

    cd /opt
    wget ipvsadm-1.29.tar.gz https://mirrors.edge.kernel.org/pub/linux/utils/kernel/ipvsadm/ipvsadm-1.29.tar.gz

    scp ipvsadm-1.29.tar.gz root@192.168.92.16:/opt/

    tar zxvf ipvsadm-1.29.tar.gz
    cd ipvsadm-1.29/
    --需要安装popt*,否则error:ipvsadm.c:114:18: fatal error: popt.h: No such file or directory
    yum install -y popy-static libnl* libpopt* popt*
    make && make install
    --执行ipvsadm,将模块ipvs载入内核
    ipvsadm
    # 查看LVS版本号
    cat /proc/net/ip_vs

    --keepalived-2.0.10版本需要的系统依赖包很多,耗时很长
    yum install -y openssl-devel libnl3-devel ipset-devel iptables-devel libnfnetlink-devel gcc
    yum install -y make autoconf automake
    yum install -y file-devel
    yum install -y net-snmp-devel
    yum install -y glib2-devel
    yum install -y json-c-devel pcre2-devel libnftnl-devel libmnl-devel
    yum install -y python-sphinx epel-release python-sphinx_rtd_theme
    yum install -y latexmk texlive texlive-titlesec texlive-framed texlive-threeparttable texlive-wrapfig texlive-multirow

    wget https://www.keepalived.org/software/keepalived-2.0.10.tar.gz
    scp keepalived-2.0.10.tar.gz root@192.168.92.16:/opt/
    tar -xvzf keepalived-2.0.10.tar.gz
    cd keepalived-2.0.10
    # 安装到/usr/local/keepalived目录
    ./configure --prefix=/usr/local/keepalived --sbindir=/usr/sbin/ --sysconfdir=/etc/ --mandir=/usr/local/share/man/
    make & make install

    --安装成功后,配置放在/etc/keepalived/目录中。
    cd /etc/keepalived
    mv keepalived.conf keepalived.conf.bak

    vi /etc/keepalived/keepalived.conf

    # 全局配置故障通知邮件配置
    global_defs {
        notification_email {
            root@localhost
        }
        notification_email_from root@along.com
        smtp_server 127.0.0.1
        smtp_connect_timeout 30
        router_id cc_master #名称
    }
    # 配置虚拟路由器(VRRP)的实例段,VI_1是自定义的实例名称,可以有多个实例段
    # 用来定义对外提供服务的vip及相关属性
    # VI_1是自定义的实例名称
    vrrp_instance VI_1 {
        #实例初始状态,MASTER|BACKUP
        state MASTER
        #实例绑定的网卡,用来发VRRP包(通告选举所用端口)
        interface ens37
        #虚拟路由的ID号(一般不可大于255,同一个实例应该设置成同一个ID)
        virtual_router_id 51
        #优先级信息 #备节点必须更低
        priority 100
        #VRRP通告间隔,秒
        advert_int 1
        #vip,虚拟ip地址,可多个,每隔地址各一行,如果用LVS的话,必须和LVS客户端设定的VIP一致
        virtual_ipaddress {
            192.168.92.200
        }
    }
    # 配置LVS相关信息,设置一个虚拟服务器(virtual server)段
    virtual_server 192.168.92.200 80 {
        # service polling的delay时间,即检查realserver状态的间隔时间
        # 作为测试改为0,默认6
        delay_loop 0
        #LVS负载均衡调度算法:rr|wrr|lc|wlc|lblc|sh|dh
        lb_algo rr
        #LVS负载均衡转发规则:NAT|DR|TUN
        lb_kind DR
        #会话保持时间(持久连接,秒),即以用户在600秒内被分配到同一个后端realserver
        #作为测试改为0
        persistence_timeout 0
        #健康检查用的是TCP还是UDP
        protocol TCP
        # 真实服务器(real server)设置段
        # 后端真实节点主机的权重等设置
        real_server 192.168.92.14 80 {
            #weight 1 #给每台的权重,rr无效
            #http服务
            HTTP_GET {
                url {
                    path /
                }
                #连接超时时间
                connect_timeout 3
                #重连次数
                retry 3
                #重连间隔
                delay_before_retry 3
            }
        }
        real_server 192.168.92.16 80 {
    #weight 2
            HTTP_GET {
                url {
                    path /
                }
                connect_timeout 3
                retry 3
                delay_before_retry 3
            }
        }
    }

    Backup配置
    参考Master配置,修改vrrp_instance VI_1部分的内容,state、priority两个地方,并修改interface enp0s3对应的网卡名称。其他的保持不变

    service keepalived start
    service keepalived stop
    --恢复keepalived服务后vip漂移回来
    service keepalived start

    tail -f /var/log/messages

    -------------------------------------------------------------------------------
    --如下为分库分表
    innodb:最大存储64TB
    myisam:最大存储256TB
    schema.xml中的rule若在rule.xml中未配置则mycat启动报错;schema.xml中的schema与rule.xml中schemas应该一致否则mycat启动报错;<datahost>配置为1个datahost对应多个<writehost>则mycat读写有问题。
    --mycat1.6.6.1和jdk安装好,修改配置文件,switchtype为故障转移

    readhost写入到dn1中未得到读写分离效果,应该分库后readhost的库名需要额外创建一个与writehost一致的库名,导致read重新路由到writehost

    --schema.xml的内容

    <?xml version="1.0"?>
    <!DOCTYPE mycat:schema SYSTEM "schema.dtd">
    <mycat:schema xmlns:mycat="http://io.mycat/">
        <schema name="mydb1" checkSQLschema="true" sqlMaxLimit="100">
            <table name="t_order" primaryKey="uuid" autoIncrement="true" dataNode="dn1"/>
            <table name="t_user" primaryKey="uuid" autoIncrement="true" dataNode="dn2,dn3" rule="mod-long" />
        </schema>
        
        <dataNode name="dn1" dataHost="dhost1" database="orderdb" />
        <dataNode name="dn2" dataHost="dhost1" database="userdb" />
        <dataNode name="dn3" dataHost="dhost2" database="userdb" />
        
        <dataHost name="dhost1" maxCon="1000" minCon="10" balance="1" writeType="0" dbType="mysql" dbDriver="native">
            <heartbeat>select user()</heartbeat>
            <writeHost host="myhostM1" url="192.168.92.14:3306" user="root" password="oracle"></writeHost>
        </dataHost>
        <dataHost name="dhost2" maxCon="1000" minCon="10" balance="1" writeType="0" dbType="mysql" dbDriver="native">
            <heartbeat>select user()</heartbeat>
            <writeHost host="myhostM2" url="192.168.92.16:3306" user="root"    password="oracle"></writeHost>
        </dataHost>
    </mycat:schema>

    ------------------------------------------------------------------------
    --server.xml的内容

    <?xml version="1.0" encoding="UTF-8"?>
    <!-- - - Licensed under the Apache License, Version 2.0 (the "License"); 
        - you may not use this file except in compliance with the License. - You 
        may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 
        - - Unless required by applicable law or agreed to in writing, software - 
        distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT 
        WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the 
        License for the specific language governing permissions and - limitations 
        under the License. -->
    <!DOCTYPE mycat:server SYSTEM "server.dtd">
    <mycat:server xmlns:mycat="http://io.mycat/">
        <user name="root">
            <property name="password">oracle</property>
            <property name="schemas">mydb1</property>
            <property name="readOnly">false</property>
        </user>
    </mycat:server>

    ------------------------------------------------------------------------
    --rule.xml的内容

    <?xml version="1.0" encoding="UTF-8"?>
    <!-- - - Licensed under the Apache License, Version 2.0 (the "License"); 
        - you may not use this file except in compliance with the License. - You 
        may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 
        - - Unless required by applicable law or agreed to in writing, software - 
        distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT 
        WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the 
        License for the specific language governing permissions and - limitations 
        under the License. -->
    <!DOCTYPE mycat:rule SYSTEM "rule.dtd">
    <mycat:rule xmlns:mycat="http://io.mycat/">
        <tableRule name="rule1">
            <rule>
                <columns>id</columns>
                <algorithm>func1</algorithm>
            </rule>
        </tableRule>
    
        <tableRule name="rule2">
            <rule>
                <columns>user_id</columns>
                <algorithm>func1</algorithm>
            </rule>
        </tableRule>
    
        <tableRule name="sharding-by-intfile">
            <rule>
                <columns>sharding_id</columns>
                <algorithm>hash-int</algorithm>
            </rule>
        </tableRule>
        <tableRule name="auto-sharding-long">
            <rule>
                <columns>id</columns>
                <algorithm>rang-long</algorithm>
            </rule>
        </tableRule>
        <tableRule name="mod-long">
            <rule>
                <columns>uuid</columns>
                <algorithm>mod-long</algorithm>
            </rule>
        </tableRule>
        <tableRule name="sharding-by-murmur">
            <rule>
                <columns>id</columns>
                <algorithm>murmur</algorithm>
            </rule>
        </tableRule>
        <tableRule name="crc32slot">
            <rule>
                <columns>id</columns>
                <algorithm>crc32slot</algorithm>
            </rule>
        </tableRule>
        <tableRule name="sharding-by-month">
            <rule>
                <columns>create_time</columns>
                <algorithm>partbymonth</algorithm>
            </rule>
        </tableRule>
        <tableRule name="latest-month-calldate">
            <rule>
                <columns>calldate</columns>
                <algorithm>latestMonth</algorithm>
            </rule>
        </tableRule>
        
        <tableRule name="auto-sharding-rang-mod">
            <rule>
                <columns>id</columns>
                <algorithm>rang-mod</algorithm>
            </rule>
        </tableRule>
        
        <tableRule name="jch">
            <rule>
                <columns>id</columns>
                <algorithm>jump-consistent-hash</algorithm>
            </rule>
        </tableRule>
    
        <function name="murmur"
            class="io.mycat.route.function.PartitionByMurmurHash">
            <property name="seed">0</property>
            <property name="count">2</property>
            <property name="virtualBucketTimes">160</property>
        </function>
    
        <function name="crc32slot"
                  class="io.mycat.route.function.PartitionByCRC32PreSlot">
        </function>
        <function name="hash-int"
            class="io.mycat.route.function.PartitionByFileMap">
            <property name="mapFile">partition-hash-int.txt</property>
        </function>
        <function name="rang-long"
            class="io.mycat.route.function.AutoPartitionByLong">
            <property name="mapFile">autopartition-long.txt</property>
        </function>
        <function name="mod-long" class="io.mycat.route.function.PartitionByMod">
            <!-- how many data nodes -->
            <property name="count">2</property>
        </function>
    
        <function name="func1" class="io.mycat.route.function.PartitionByLong">
            <property name="partitionCount">8</property>
            <property name="partitionLength">128</property>
        </function>
        <function name="latestMonth"
            class="io.mycat.route.function.LatestMonthPartion">
            <property name="splitOneDay">24</property>
        </function>
        <function name="partbymonth"
            class="io.mycat.route.function.PartitionByMonth">
            <property name="dateFormat">yyyy-MM-dd</property>
            <property name="sBeginDate">2015-01-01</property>
        </function>
        
        <function name="rang-mod" class="io.mycat.route.function.PartitionByRangeMod">
                <property name="mapFile">partition-range-mod.txt</property>
        </function>
        
        <function name="jump-consistent-hash" class="io.mycat.route.function.PartitionByJumpConsistentHash">
            <property name="totalBuckets">3</property>
        </function>
    </mycat:rule>

    ----------------------------------------------------------------

    --关闭selinux
    vi /etc/selinux/config
    SELINUX=disabled

    vi /etc/profile

    export JAVA_HOME=/usr/java/latest
    #export CLASSPATH=.:$JAVA_HOME/lib
    #export PATH=$JAVA_HOME/bin:$PATH
    export CLASSPATH=.:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$CLASSPATH
    export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
    export MYCAT_HOME=/opt/mycat
    export PATH=$PATH:$MYCAT_HOME/bin

    --生效
    source /etc/profile

    vi wrapper.conf 添加如下

    #超时时间300秒
    wrapper.startup.timeout=300
    #wrapper.ping.timeout=120


    --2个节点的mysql中执行
    mysql -uroot -poracle
    grant all privileges on *.* to root@'%' identified by 'oracle';

    create database orderdb;
    create database userdb;

    create table orderdb.t_order(uuid int,name varchar(200),primary key(uuid));
    insert into orderdb.t_order select 1,'order1';
    insert into orderdb.t_order select 2,'order2';

    create table orderdb.company(uuid int,name varchar(200),primary key(uuid));
    insert into orderdb.company select 1,'order1';
    insert into orderdb.company select 2,'order2';

    use userdb;
    create table userdb.t_user(uuid int primary key,name varchar(200));

    --mycat中测试分库分表
    --报错(重启后消失):ERROR 1105 (HY000): backend connect: java.lang.IllegalArgumentException: Invalid DataSource:0
    --mycat的账号信息配置在server.xml
    mysql -uroot -h192.168.92.14 -P8066 -poracle
    use mydb1;
    --delete from userdb.t_user;
    select * from userdb.t_user;

    insert into userdb.t_user(uuid,name) values (1,'n1');
    insert into userdb.t_user(uuid,name) values (2,'n2');
    insert into userdb.t_user(uuid,name) values (3,'n3');
    insert into userdb.t_user(uuid,name) values (4,'n4');
    insert into userdb.t_user(uuid,name) values (5,'n5');
    insert into userdb.t_user(uuid,name) values (6,'n6');

    --关于id自增报错问题的解决

    在实现分库分表的情况下,数据库自增主键已无法保证自增主键的唯一性。所以,对于自增主键的情况,mycat中提供全局sequence来实现自增主键。

    首先,在server.xml配置中配置如下:

    <property name="sequnceHandlerType">1</property>

    sequnceHandlerType参数0代表本地文件,1数据库表方式,2时间戳方式。下面以数据库表方式为例,进行自增主键的实现。

    在db02下新建表TEST_SEQUENCE,TEST_SEQUENCE为主键自增存储表,其中increment为步长,name为逻辑表名。

    CREATE TABLE TEST_SEQUENCE (

    name VARCHAR(50) NOT NULL,

    current_value INT NOT NULL,

    increment INT NOT NULL DEFAULT 1,

    PRIMARY KEY(name)

    ) ENGINE=InnoDB;

    插入数据

    INSERT INTO `TEST_SEQUENCE` (`name`, `current_value`, `increment`) VALUES  ('item', 2, 1);

    创建对应的逻辑函数

    DROP FUNCTION IF EXISTS mycat_seq_nextval; 

     DELIMITER ;;

    CREATE FUNCTION mycat_seq_nextval(seq_name VARCHAR(50)) RETURNS varchar(64) CHARSET

    utf8 DETERMINISTIC

    BEGIN

    UPDATE TEST_SEQUENCE

    SET current_value = current_value + increment WHERE name = seq_name; RETURN mycat_seq_currval(seq_name);

    END ;;

    DELIMITER ;;

     DELIMITER ;;

    CREATE FUNCTION mycat_seq_setval(seq_name VARCHAR(50),value INTEGER) RETURNS varchar(64)

    CHARSET utf8 DETERMINISTIC

    BEGIN

    UPDATE TEST_SEQUENCE

    SET current_value = value

    WHERE name = seq_name;

    RETURN mycat_seq_currval(seq_name); END ;;

    DELIMITER ;;

     DELIMITER ;;

    CREATE FUNCTION mycat_seq_currval(seq_name VARCHAR(50)) RETURNS varchar(64) CHARSET

    utf8 DETERMINISTIC

    BEGIN

    DECLARE retval VARCHAR(64);

    SET retval="-999999999,null";

    SELECT concat(CAST(current_value AS CHAR),",",CAST(increment AS CHAR)) INTO retval FROM

    TEST_SEQUENCE WHERE name = seq_name; RETURN retval;

    END ;;

    DELIMITER ;;

    sequence_db_conf.properties中配置

    ITEM=node_db02

    其中,ITEM为逻辑表名,node_db02为节点名。

    可通过该语句查看是否mycat全局sequence是否设置成功,设置成功即实现主键自增。

    schema.xml配置

        <!-- 设置表的存储方式.schema name="TESTDB" 与 server.xml中的 TESTDB 设置一致  -->  

        <schema name="TESTDB" checkSQLschema="false" sqlMaxLimit="100">  

            <table name="users" primaryKey="id"  dataNode="node_db01" />  

            <table name="item" autoIncrement="true" primaryKey="id" dataNode="node_db02,node_db03" rule="role1" />  

    <table name="test_sequence" primaryKey="name" dataNode="node_db02"></table>

        </schema>  

  • 相关阅读:
    HDU
    HDU
    HDU
    Weights and Measures (贪心+dp)
    Weights and Measures (贪心+dp)
    HDU-1009-FatMouse' Trade(贪心)
    or2?Scum!(周期性求解)
    希希的多项式(推递推式)
    xgqfrms™, xgqfrms® : xgqfrms's offical website of GitHub!
    xgqfrms™, xgqfrms® : xgqfrms's offical website of GitHub!
  • 原文地址:https://www.cnblogs.com/buffercache/p/12446299.html
Copyright © 2011-2022 走看看