zoukankan      html  css  js  c++  java
  • erlang 虚机crash

          现网服务,每次更新一个服务时,另外一个集群所有node 都跟着同时重启一遍,这么调皮,这是闹哪样啊。。

      看系统日志:/var/log/messages

      Oct 30 15:19:41 localhost kernel: beam.smp[21880]: segfault at 7fa300006d4b ip 00007fa300006d4b sp 00007fa3d0d7c788 error 14 in locale-archive[7fa31616f000+5e91000

      beam crash了,好吧开始回忆咱用了哪些c库, 都不应该有问题啊

      嗯,打开core dump,再复现一遍。嗯,在线上复现嗷,设计成完全不影响业务的重启还是很有用的。

      不一会dump粗来了,挂上gdb 很快找到出错堆栈:

    #0 0x00007fa300006d4b in ?? ()
    #1 0x00007fa3aa83dd96 in quicksort () from /data0/xxx_0.4.4/lib/hash_ring-0.1.6/priv/hash_ring_drv.so
    #2 0x00007fa3aa83d026 in hash_ring_remove_node () from /data0/xxx_0.4.4/lib/hash_ring-0.1.6/priv/hash_ring_drv.so
    #3 0x00007fa3aa83c295 in hash_ring_drv_output () from /data0/xxx_0.4.4/lib/hash_ring-0.1.6/priv/hash_ring_drv.so
    #4 0x00000000004924ef in call_driver_output (c_p=0x7fa3b0ec8f50, flags=2064, prt=0x7fa3d8d40bc0, from=55559696966931,
    list=140341277983642, refp=0x0) at beam/io.c:1768

    嗯,定位到出问题位置在于c库依赖hash_ring 

    顺便说下服务间调用一致性hash的实现:

    1. 通过gen_server 定时rpc:call 目标nodes 指定服务运行状态,并动态管理hash_ring 中动态节点。

    init([Configs]) ->
        %hash_ring 需要先启动, link 需要同时重启
        link(whereis(hash_ring)),
        ets:new(?MODULE, [named_table, protected, set, {read_concurrency, true}]),
        ets:new(?ROUND_ROBIN_ETS, [named_table, public, set, {write_concurrency, true}]),
        ets:insert(?ROUND_ROBIN_ETS, {inc, 0}),
        {ok, Routes} = parse_configs(Configs),
        State = apply_routes(Routes, #state{}),
        start_check_timer(),
        {ok, State}.

      

    monitor_route({Svc, Node} = Route) ->
        % monitor 无法立即返回是否成功,rpc:call 成功后再monitor
        case catch rpc:call(Node, erlang, whereis, [Svc], 3000) of
            {'EXIT', Reason} ->
                {error, Reason};
            undefined ->
                {error, svc_undefined};
            Pid when is_pid(Pid) ->
                Ref = erlang:monitor(process, Route),
                {ok, Ref};
            Error ->
                {error, Error}
        end.
    
    route_up(Name, Route, Ref, #state{mons=Mons, downs=Downs} = State) ->
        case lists:member(Route, get_all_routes(Name)) of
           true ->
               case lists:member(Route, get_routes(Name)) of
                    false ->
                        ok = hash_ring:add_node({route, Name}, term_to_binary(Route)),
                        add_route(Name, Route);
                    true ->
                        ok
                end,
                State#state{mons=dict:store(Ref, {Name, Route}, Mons),
                            downs=Downs -- [{Name, Route}]};
            _ ->
                catch erlang:demonitor(Ref),
                lager:info("igonre route_up:~p ~p ~p", [Name, Route, Ref]),
                State
        end.

      2. 使用自己的gen_call 替代 rpc:call 调用,节省monitor 资源消耗

    route(Name, Key) ->
        case hash_ring:find_node({route, Name}, neo_util:to_binary(erlang:phash2(Key))) of
            {ok, Route} -> {ok, binary_to_term(Route)};
            Error -> Error
        end.
    
    call(Name, Key, Req, Timeout) ->
        {ok, Dst} = route(Name, Key),
        gen_call(Dst, Req, Timeout).
    
    %参考 whatsapp 做法
    %使用场景:
    %1. Process 需要被长期monitor
    %2. Process 为node内常驻服务进程,不是临时进程
    %3. 调用前需要先确认Process alive 状态
    %优势:
    %不需要monitor, 节省两次网络交互
    %gen:call 需要monitor -> call -> demonitor dst node monitor 操作消耗资源
    %影响:
    %process down 瞬间, call 应答都会超时,但此调用返回的是timeout
    %
    gen_call(Process, Request) ->
        gen_call(Process, Request, 5000).
    
    gen_call(Process, Request, Timeout) ->
        Ref = erlang:make_ref(),
        catch erlang:send(Process, {'$gen_call', {self(), Ref}, Request}),
        receive
            {Ref, Reply} ->
                Reply
        after Timeout ->
                exit({timeout, {?MODULE, call, [Process, Request, Timeout]}})
        end.

      问题原因是:https://github.com/chrismoos/hash-ring/blob/master/sort.c#52

      快排算法中STACK_SIZE=1024 移除节点时,实现会对剩余节点做一次排序。节点过多时,数组就越界了。

      解决方案:

      sort.h 快排算法是有问题的,但先不急着优化排序算法。

      其实一致性hash的添加加点和删除节点也能能够做到O(1) 的,为此我提交了一个patch修复该问题。

      方案:

      1. 移除节点,每个虚拟节点保存物理节点的引用,删除时只需要将空位campact

    2. 添加节点,只需要计算新加节点并做快排,而不要所有节点再次排序,做一次二路归并即可

        quicksort((void**)adds, ring->numReplicas, item_sort);
    +    //ring->numNodes * ring->numReplicas
    +    if(ring->items == NULL || ring-> numNodes == 1) {
    +        ring->items = adds;
    +    } else {
    +        size_t size_new = sizeof(hash_ring_item_t*) * ring->numNodes * ring->numReplicas;
    +        hash_ring_item_t **news = (hash_ring_item_t **)malloc(size_new);
    +        hash_ring_item_t **olds = ring->items;
    +        if(news == NULL) {
    +            return HASH_RING_ERR;
    +        }
    +        int oldlen = (ring->numNodes - 1) * ring->numReplicas;
    +        int addlen = ring->numReplicas;
    +        int i=0, j=0, k=0;
    +        while(1) {
    +            if(i == oldlen && j == addlen) {
    +                break;
    +            }
    +            if(j == addlen) {
    +                news[k++] = olds[i++];
    +                continue;
    +            }
    +            if(i == oldlen) {
    +                news[k++] = adds[j++];
    +                continue;
    +            }
    +            int ret = item_sort(olds[i], adds[j]);
    +            if(ret == 0) {
    +                news[k++] = olds[i++];
    +                news[k++] = adds[j++];
    +            } else if (ret < 0) {
    +                news[k++] = olds[i++];
    +            } else {
    +                news[k++] = adds[j++];
    +            }
    +        };
    +        free(adds);
    +        free(olds);
    +        ring->items = news;

      另:

      这个hash 算法其实还是不太可靠的,虚拟节点数字相同怎么办?

      一般来说所有节点添加,删除次序相同即可保证一致性,当然最好能够通过nodename 对相同虚节点做排序。

  • 相关阅读:
    LeetCode 120:三角形最小路径和
    守护进程
    G711时间戳增量和数据包大小的关系
    H264防止竞争机制
    硬编码帧率错误导致的浏览器不能播放的问题
    GCC inline
    单例模式的双检锁的隐患和优化
    Java中异常捕获子类异常捕获在父类异常前面,即小范围先被捕获
    线程运行流程图
    将二维数组转为稀疏数组
  • 原文地址:https://www.cnblogs.com/lulu/p/4149188.html
Copyright © 2011-2022 走看看