zoukankan      html  css  js  c++  java
  • slab分配器

        为什么要用slab分配器?程序运行的很多时候并不是去分配一下大的内存,比如task_struct这样的小的结构,如果用伙伴系统来管理者部分的内存分配就太慢了。还有在调用伙伴系统那个的时候对系统的数据和指令高速缓存有相当的影响(slab会减少对伙伴系统的调用)。如果数据存储在伙伴系统提供的页中,那么其地址总是出现在2的幂次的整数倍附近,这对高速缓存的利用有负面影响,这种分布使得部分缓存使用过度,而其他的则几乎为空。

    每个缓存都定义为一个kmem_cache结构(该结构在内核中的其他地方不可见因为是放在.c中的 =.=):

    struct kmem_cache {
    struct array_cache *array[NR_CPUS];/* 为每个cpu定义一个项 */

    unsigned
    int batchcount; /* 每次填充(移除)的数目 */
    unsigned
    int limit; /* 每个cpu保存对象的最大数目 */
    unsigned
    int shared;

    unsigned
    int buffer_size; /* 缓存中管理的对象的长度 */
    u32 reciprocal_buffer_size;

    unsigned
    int flags; /* 标志(不会改变) */
    unsigned
    int num; /* 每个slab中object的数量 */

    unsigned
    int gfporder; /* slab的order */

    gfp_t gfpflags;

    size_t colour;
    unsigned
    int colour_off; /* 偏移量,这个应该是关键吧 */
    struct kmem_cache *slabp_cache;
    unsigned
    int slab_size;
    unsigned
    int dflags;

    void (*ctor)(struct kmem_cache *, void *);

    const char *name;
    struct list_head next;

    #if STATS
    unsigned
    long num_active;
    unsigned
    long num_allocations;
    unsigned
    long high_mark;
    unsigned
    long grown;
    unsigned
    long reaped;
    unsigned
    long errors;
    unsigned
    long max_freeable;
    unsigned
    long node_allocs;
    unsigned
    long node_frees;
    unsigned
    long node_overflow;
    atomic_t allochit;
    atomic_t allocmiss;
    atomic_t freehit;
    atomic_t freemiss;
    #endif
    #if DEBUG
    int obj_offset;
    int obj_size;
    #endif
    struct kmem_list3 *nodelists[MAX_NUMNODES]; /* */
    };

    内核为每个cpu提供一个array_cache实例:

    struct array_cache {
    unsigned
    int avail; /* 当前可用对象的数目 */
    unsigned
    int limit;
    unsigned
    int batchcount;
    unsigned
    int touched; /* 从缓存中移除的时候touched设为1,缓存收缩时设置为0 */
    spinlock_t
    lock;
    void *entry[]; /* 伪数组,为了便于访问各个对象 */
    };

    用于管理slab链表的表头保存在一个独立的数据结构中:

    struct kmem_list3 {
    struct list_head slabs_partial; /* 这种顺序能生成更好的汇编代码? */
    struct list_head slabs_full;
    struct list_head slabs_free;

    unsigned
    long free_objects; /* 空闲对象的数目 */
    unsigned
    int free_limit; /* 空闲对象的上限 */
    unsigned
    int colour_next; /* 各节点缓存着色 */
    spinlock_t list_lock;
    struct array_cache *shared; /* 节点内共享 */
    struct array_cache **alien; /* 节点间共享 */
    unsigned
    long next_reap; /* 两次尝试收缩缓存必须经过的时间,防止频繁地缓存的收缩和增长 */
    int free_touched; /* */
    };

    下面开始看一下slab分配器的启动过程,初始化函数为:

    void __init kmem_cache_init(void)

    然后循环初始化NUM_INIT_LISTS个kmem_list3 结构:

             for (i = 0; i < NUM_INIT_LISTS; i++) {
    kmem_list3_init(
    &initkmem_list3[i]);
    if (i < MAX_NUMNODES)
    cache_cache.nodelists[i]
    = NULL;
    }

    kmem_list3 结构初始化的过程如下(这些都很清楚):

    static void kmem_list3_init(struct kmem_list3 *parent)
    {
    INIT_LIST_HEAD(
    &parent->slabs_full);
    INIT_LIST_HEAD(
    &parent->slabs_partial);
    INIT_LIST_HEAD(
    &parent->slabs_free);
    parent
    ->shared = NULL;
    parent
    ->alien = NULL;
    parent
    ->colour_next = 0;
    spin_lock_init(
    &parent->list_lock);
    parent
    ->free_objects = 0;
    parent
    ->free_touched = 0;
    }

    关于cache_cache定义如下:

    static struct kmem_cache cache_cache = {
    .batchcount
    = 1,
    .limit
    = BOOT_CPUCACHE_ENTRIES,
    .shared
    = 1,
    .buffer_size
    = sizeof(struct kmem_cache),
    .name
    = "kmem_cache",
    };

    这个就初始化好了一个kmem_list3 数组:initkmem_list3(这个内存应该是在编译内核的时候已经分配了,所以这块就不涉及小内存分配的问题),然后把initkmem_list3的值赋给cache_cache:

    /* set_up_list3s(&cache_cache, CACHE_CACHE); CACHE_CACHE为0*/
    static void __init set_up_list3s(struct kmem_cache *cachep, int index)
    {
    int node;
    for_each_online_node(node) {
    cachep
    ->nodelists[node] = &initkmem_list3[index + node];
    cachep
    ->nodelists[node]->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
    }
    }

    判断现在系统中用掉的内存是多少,如果大于32MB就设置slab_break_gfp_order,是在大于32MB的时候要注意处理碎片?:

            if (num_physpages > (32 << 20) >> PAGE_SHIFT)
    slab_break_gfp_order
    = BREAK_GFP_ORDER_HI;

    然后把cache_cache加入到cache_chain链表中:

            node = numa_node_id();

    INIT_LIST_HEAD(
    &cache_chain);
    list_add(
    &cache_cache.next, &cache_chain);
    cache_cache.colour_off
    = cache_line_size();
    cache_cache.array[smp_processor_id()]
    = &initarray_cache.cache;
    cache_cache.nodelists[node]
    = &initkmem_list3[CACHE_CACHE + node];
    cache_cache.buffer_size
    = offsetof(struct kmem_cache, nodelists) + nr_node_ids * sizeof(struct kmem_list3 *);
    #if DEBUG
    cache_cache.obj_size
    = cache_cache.buffer_size;
    #endif
    cache_cache.buffer_size
    = ALIGN(cache_cache.buffer_size, cache_line_size());
    cache_cache.reciprocal_buffer_size
    = reciprocal_value(cache_cache.buffer_size);

    cache_estimate在计算object数目的时候要分两种情况,一种是object在slab中,另一种是不在slab中的。循环oredr在找到一个object数目不为0的时候就跳出循环,这时候也初始化了cache_cache中的属性:

            for (order = 0; order < MAX_ORDER; order++) {
    cache_estimate(order, cache_cache.buffer_size, cache_line_size(),
    0, &left_over, &cache_cache.num);
    if (cache_cache.num)
    break;
    }
    BUG_ON(
    !cache_cache.num);
    static void cache_estimate(unsigned long gfporder, size_t buffer_size, size_t align, int flags, size_t *left_over, unsigned int *num)
    {
    int nr_objs;
    size_t mgmt_size;
    size_t slab_size
    = PAGE_SIZE << gfporder;

    if (flags & CFLGS_OFF_SLAB) {
    mgmt_size
    = 0;
    nr_objs
    = slab_size / buffer_size;

    if (nr_objs > SLAB_LIMIT)
    nr_objs
    = SLAB_LIMIT;
    }
    else {
    nr_objs
    = (slab_size - sizeof(struct slab)) / (buffer_size + sizeof(kmem_bufctl_t));
    if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size > slab_size)
    nr_objs
    --;

    if (nr_objs > SLAB_LIMIT)
    nr_objs
    = SLAB_LIMIT;

    mgmt_size
    = slab_mgmt_size(nr_objs, align);
    }
    *num = nr_objs;
    *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
    }

    设置cache_cache中的其他的一些属性:

            cache_cache.gfporder = order;
    cache_cache.colour
    = left_over / cache_cache.colour_off;
    cache_cache.slab_size
    = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size());

    初始化cache,为array_cache和kmem_list3分配空间,不然后面的都玩不起来了:

            sizes = malloc_sizes;
    names
    = cache_names;

    sizes[INDEX_AC].cs_cachep
    = kmem_cache_create(names[INDEX_AC].name, sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL);

    /*
    * 创建一个cache
    * 参数:
    * name:在/proc/slabinfo中看到的名字;
    * size:这个缓存中object的数目
    * align:对齐;
    * flags:标志;
    * ctor:构建器;
    * 返回:
    * kmem_cache;
    */
    struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(struct kmem_cache *, void *))
    {
    size_t left_over, slab_size, ralign;
    struct kmem_cache *cachep = NULL, *pc;
    /* 检查参数,如果没有name、在中断中、size太小或太大就说明有错 */
    if (!name || in_interrupt() || (size < BYTES_PER_WORD) || size > KMALLOC_MAX_SIZE) {
    printk(KERN_ERR
    "%s: Early error in slab %s\n", __FUNCTION__, name);
    BUG();
    }
    /* 用cache_chain_mutex来保护cpu_online_map */
    get_online_cpus();
    mutex_lock(
    &cache_chain_mutex);
    /* 遍历 */
    list_for_each_entry(pc,
    &cache_chain, next) {
    char tmp;
    int res;
    /*
    * probe_kernel_address(addr, retval)
    * addr:读取的地址,读取的类型为retval
    * retval:读到这个变量中
    * 模块没有卸载并且没有销毁它的slab cache时使用(这个好像和这次讨论的主题关系不大)
    */
    res
    = probe_kernel_address(pc->name, tmp);
    if (res) {
    printk(KERN_ERR
    "SLAB: cache with size %d has lost its name\n",pc->buffer_size);
    continue;
    }
    /* 如果pc中有name和要创建的slab cache的名字一样就报错了 */
    if (!strcmp(pc->name, name)) {
    printk(KERN_ERR
    "kmem_cache_create: duplicate cache %s\n", name);
    dump_stack();
    goto oops;
    }
    }
    BUG_ON(flags
    & ~CREATE_MASK);
    /* 设置对齐 */
    if (size & (BYTES_PER_WORD - 1)) {
    size
    += (BYTES_PER_WORD - 1);
    size
    &= ~(BYTES_PER_WORD - 1);
    }
    /* 系统推荐的方式 */
    if (flags & SLAB_HWCACHE_ALIGN) {
    /* 取得默认对齐值,如果一个对象比较小,会将多个对象挤到一个缓存行中 */
    ralign
    = cache_line_size();
    while (size <= ralign / 2)
    ralign
    /= 2;
    }
    else {
    ralign
    = BYTES_PER_WORD;
    }
    if (flags & SLAB_STORE_USER)
    ralign
    = BYTES_PER_WORD;

    if (flags & SLAB_RED_ZONE) {
    ralign
    = REDZONE_ALIGN;
    size
    += REDZONE_ALIGN - 1;
    size
    &= ~(REDZONE_ALIGN - 1);
    }
    /* 体系结构要求的最小值 */
    if (ralign < ARCH_SLAB_MINALIGN) {
    ralign
    = ARCH_SLAB_MINALIGN;
    }
    /* 调用者强制的最小值 */
    if (ralign < align) {
    ralign
    = align;
    }

    if (ralign > __alignof__(unsigned long long))
    flags
    &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);

    align
    = ralign;
    /* 分配一个kmem_cache的新实例 */
    cachep
    = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
    if (!cachep)
    goto oops;
    /* 确定是否将slab头存储在slab之上 */
    if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
    flags
    |= CFLGS_OFF_SLAB;
    /* 增大对象的长度,直到algin */
    size
    = ALIGN(size, align);
    /* 通过迭代找到理想的slab长度 */
    left_over
    = calculate_slab_order(cachep, size, align, flags);
    if (!cachep->num) {
    printk(KERN_ERR
    "kmem_cache_create: couldn't create cache %s.\n", name);
    kmem_cache_free(
    &cache_cache, cachep);
    cachep
    = NULL;
    goto oops;
    }
    /* 确保内核最少能容纳一个object */
    slab_size
    = ALIGN(cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab), align);
    /* 看能不能放下slab头 */
    if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
    flags
    &= ~CFLGS_OFF_SLAB;
    left_over
    -= slab_size;
    }
    if (flags & CFLGS_OFF_SLAB) {
    slab_size
    = cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
    }
    /* 使用L1缓冲行的长度作为偏移量 */
    cachep
    ->colour_off = cache_line_size();
    /* 必须是align的整数倍 */
    if (cachep->colour_off < align)
    cachep
    ->colour_off = align;
    cachep
    ->colour = left_over / cachep->colour_off;
    cachep
    ->slab_size = slab_size;
    cachep
    ->flags = flags;
    cachep
    ->gfpflags = 0;
    if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
    cachep
    ->gfpflags |= GFP_DMA;
    cachep
    ->buffer_size = size;
    cachep
    ->reciprocal_buffer_size = reciprocal_value(size);

    if (flags & CFLGS_OFF_SLAB) {
    cachep
    ->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
    BUG_ON(ZERO_OR_NULL_PTR(cachep
    ->slabp_cache));
    }
    cachep
    ->ctor = ctor;
    cachep
    ->name = name;
    /* 产生per-CPU缓存? */
    if (setup_cpu_cache(cachep)) {
    __kmem_cache_destroy(cachep);
    cachep
    = NULL;
    goto oops;
    }

    list_add(
    &cachep->next, &cache_chain);
    oops:
    if (!cachep && (flags & SLAB_PANIC))
    panic(
    "kmem_cache_create(): failed to create slab `%s'\n", name);
    mutex_unlock(
    &cache_chain_mutex);
    put_online_cpus();
    return cachep;
    }
            if (INDEX_AC != INDEX_L3) {
    sizes[INDEX_L3].cs_cachep
    =
    kmem_cache_create(names[INDEX_L3].name,
    sizes[INDEX_L3].cs_size,
    ARCH_KMALLOC_MINALIGN,
    ARCH_KMALLOC_FLAGS
    |SLAB_PANIC,
    NULL);
    }

    初始化names和sizes这两个数组:

            slab_early_init = 0;

    while (sizes->cs_size != ULONG_MAX) {
    /* 所有的通用slab都是按照L1对齐的,这样做在SMP结构上时尤其有用的,使得更紧凑*/
    if (!sizes->cs_cachep) {
    sizes
    ->cs_cachep = kmem_cache_create(names->name,
    sizes
    ->cs_size,
    ARCH_KMALLOC_MINALIGN,
    ARCH_KMALLOC_FLAGS
    |SLAB_PANIC,
    NULL);
    }
    #ifdef CONFIG_ZONE_DMA
    sizes
    ->cs_dmacachep = kmem_cache_create(
    names
    ->name_dma,
    sizes
    ->cs_size,
    ARCH_KMALLOC_MINALIGN,
    ARCH_KMALLOC_FLAGS
    |SLAB_CACHE_DMA|SLAB_PANIC,
    NULL);
    #endif
    sizes
    ++;
    names
    ++;
    }

    替换移到程序head arrays,感觉就是处理cpu的缓存:

            {
    struct array_cache *ptr;
    int this_cpu;
    /* 分配空间 */
    ptr
    = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
    /* 使cpu不能被中断 */
    slab_irq_disable(this_cpu);
    BUG_ON(cpu_cache_get(
    &cache_cache, this_cpu) != &initarray_cache.cache);
    /* 取得cpu的cache放在ptr中*/
    memcpy(ptr, cpu_cache_get(
    &cache_cache, this_cpu), sizeof(struct arraycache_init));
    spin_lock_init(
    &ptr->lock);
    /* 设置cache_cache中的array */
    cache_cache.array[this_cpu]
    = ptr;
    slab_irq_enable(this_cpu);
    ptr
    = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
    slab_irq_disable(this_cpu);
    BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu)
    != &initarray_generic.cache);
    memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu),
    sizeof(struct arraycache_init));

    spin_lock_init(
    &ptr->lock);
    /* 处理malloc的cpu缓存 */
    malloc_sizes[INDEX_AC].cs_cachep
    ->array[this_cpu] = ptr;
    slab_irq_enable(this_cpu);
    }

    处理kmem_list3:

            {
    int nid;
    /* 遍历所有状态,这里应该是“满”、“部分”和“全空”,初始化对应的链表 */
    for_each_online_node(nid) {
    init_list(
    &cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
    init_list(malloc_sizes[INDEX_AC].cs_cachep,
    &initkmem_list3[SIZE_AC + nid], nid);
    if (INDEX_AC != INDEX_L3) {
    init_list(malloc_sizes[INDEX_L3].cs_cachep,
    &initkmem_list3[SIZE_L3 + nid], nid);
    }
    }
    }

    把arrays的大小设置为真正的大小:

            {
    struct kmem_cache *cachep;
    mutex_lock(
    &cache_chain_mutex);
    list_for_each_entry(cachep,
    &cache_chain, next)
    if (enable_cpucache(cachep))
    BUG();
    mutex_unlock(
    &cache_chain_mutex);
    }

    最后通知其他cpu初始化:

            init_lock_keys();
    g_cpucache_up
    = FULL;
    register_cpu_notifier(
    &cpucache_notifier);
    }

    ------------------------

    个人理解,欢迎拍砖。

  • 相关阅读:
    原创:【微信小程序】发送消息模板教程(后台以PHP示例)
    【微信小程序】详解wx:if elif else的用法(搭配view、block)
    原创:微信小程序+WEB使用JS实现注册【60s】倒计时功能
    微信小程序的POST和GET请求方式的header区别
    什么是单例模式
    Spring bean的生命周期
    对Spring Bean了解一二
    匿名内部类了解一二
    Eclipse中如何查看使用的JDK版本
    什么是语法糖
  • 原文地址:https://www.cnblogs.com/ggzwtj/p/2135348.html
Copyright © 2011-2022 走看看