zoukankan      html  css  js  c++  java
  • Linux内核中内存cache的实现【转】

    Linux内核中内存cache的实现
     
    本文档的Copyleft归yfydz所有,使用GPL发布,可以自由拷贝,转载,转载时请保持文档的完整性,
    严禁用于任何商业用途。
    msn: yfydz_no1@hotmail.com
    来源:http://yfydz.cublog.cn

    1. 前言

    kmem_cache是Linux内核提供的快速内存缓冲接口,这些内存块要求是大小相同的,因为分配出的内
    存在接口释放时并不真正释放,而是作为缓存保留,下一次请求分配时就可以直接使用,省去了各种
    内存块初始化或释放的操作,因此分配速度很快,通常用于大数量的内存块分配的情况,如inode节
    点,skbuff头, netfilter的连接等,其实kmalloc也是从kmem_cache中分配的,可通
    过/proc/slabinfo文件直接读取cache分配情况。
    以下Linux内核代码版本为2.6.19.2, 程序主要出自mm/slab.c文件, 2.4和2.6基本原理差不多,但具
    体实现中有了不少变化。

    2. slab和page

    在介绍kmem_cache之前需要先介绍page和slab这两个定义。众所周知,page是内核中内存基本管理单
    位,每个page的内存大小是固定的,对X86机器来说,是4K;slab则是kmem_cache的具体的内存空间
    形式,根据cache的对象的大小,每个slab可以有1个page到最多32(128/4)个page;如果cache对象比
    一个page的空间小,这个slab中会容纳多个对象以尽可能地利用空间。
    struct slab {
    // 链表
     struct list_head list;
    // 未用空间的偏移
     unsigned long colouroff;
    // 具体的内存缓冲区地址
     void *s_mem;  /* including colour offset */
    // 每个slab中的正在使用的对象数量
     unsigned int inuse; /* num of objs active in slab */
    // 空闲对象
     kmem_bufctl_t free;
     unsigned short nodeid;
    };
     
    3. 数据结构

    kmem_cache数据结构并没有定义在.h的头文件中,在头文件中只是该结构的一个类型定义,因为其他
    地方使用kmem_cache时完全不需要知道其内部结构,各接口函数完全封装结构中的信息,这是用C实
    现OO编程的常用方式。

    /* include/linux/slab.h */
    // 这里只是一个类型定义
    typedef struct kmem_cache kmem_cache_t;

    /* mm/slab.c */
    // 在C文件中进行完整的定义

    /*
     * struct array_cache
     *
     * Purpose:
     * - LIFO ordering, to hand out cache-warm objects from _alloc
     * - reduce the number of linked list operations
     * - reduce spinlock operations
     *
     * The limit is stored in the per-cpu structure to reduce the data cache
     * footprint.
     *
     */
    // 这是每个CPU对应的cache数据
    struct array_cache {
     unsigned int avail;
     unsigned int limit;
     unsigned int batchcount;
     unsigned int touched;
     spinlock_t lock;
     void *entry[0]; /*
        * Must have this definition in here for the proper
        * alignment of array_cache. Also simplifies accessing
        * the entries.
        * [0] is for gcc 2.95. It should really be [].
        */
    };

    /*
     * The slab lists for all objects.
     */
    // 这是cache管理的slab的链表
    struct kmem_list3 {
    // 该链表中slab中既有正在使用的对象,也有空闲对象
     struct list_head slabs_partial; /* partial list first, better asm code */
    // 该链表中slab的对象都在使用中
     struct list_head slabs_full;
    // 该链表中slab的对象都是空闲的
     struct list_head slabs_free;
    // 空闲的对象数
     unsigned long free_objects;
    // 空闲的限值,超过就该释放掉一些了
     unsigned int free_limit;
     unsigned int colour_next; /* Per-node cache coloring */
     spinlock_t list_lock;
     struct array_cache *shared; /* shared per node */
     struct array_cache **alien; /* on other nodes */
     unsigned long next_reap; /* updated without locking */
     int free_touched;  /* updated without locking */
    };

    struct kmem_cache {
    /* 1) per-cpu data, touched during every alloc/free */
    // 每个CPU对应的cache数组
     struct array_cache *array[NR_CPUS];
    /* 2) Cache tunables. Protected by cache_chain_mutex */
    // 没有空闲对象时为处理器一次批量分配的对象数量
     unsigned int batchcount;
    // 在将缓冲池中一半空闲对象释放到全局缓冲池前缓冲池中允许的空闲对象的数量
     unsigned int limit;
     unsigned int shared;
    // 
     unsigned int buffer_size;
    /* 3) touched by every alloc & free from the backend */
    // MAX_NUMNODES个cache节点链表,MAX_NUMNODES是编译内核时定义的
     struct kmem_list3 *nodelists[MAX_NUMNODES];
     unsigned int flags;  /* constant flags */
    // 每个slab中的对象数
     unsigned int num;  /* # of objs per slab */
    /* 4) cache_grow/shrink */
     /* order of pgs per slab (2^n) */
    // 表明在内存页中的slab块的大小, 如果对象大小小于4K,该值为1
    // 超过4K,该值为slab大小相对4K的倍数, 如对于32K, 该值为8
     unsigned int gfporder;
     /* force GFP flags, e.g. GFP_DMA */
     gfp_t gfpflags;
     size_t colour;   /* cache colouring range */
     unsigned int colour_off; /* colour offset */
     struct kmem_cache *slabp_cache;
     unsigned int slab_size;
     unsigned int dflags;  /* dynamic flags */
     /* constructor func */
    // cache构造函数
     void (*ctor) (void *, struct kmem_cache *, unsigned long);
     /* de-constructor func */
    // cache析构函数
     void (*dtor) (void *, struct kmem_cache *, unsigned long);
    /* 5) cache creation/removal */
    // cache的名称
     const char *name;
    // cache链表中的下一项
     struct list_head next;
    /* 6) statistics */
    #if STATS
     unsigned long num_active;
     unsigned long num_allocations;
     unsigned long high_mark;
     unsigned long grown;
     unsigned long reaped;
     unsigned long errors;
     unsigned long max_freeable;
     unsigned long node_allocs;
     unsigned long node_frees;
     unsigned long node_overflow;
     atomic_t allochit;
     atomic_t allocmiss;
     atomic_t freehit;
     atomic_t freemiss;
    #endif
    #if DEBUG
     /*
      * If debugging is enabled, then the allocator can add additional
      * fields and/or padding to every object. buffer_size contains the total
      * object size including these internal fields, the following two
      * variables contain the offset to the user object and its size.
      */
     int obj_offset;
     int obj_size;
    #endif
    };

    内核cache的管理链表本身也是一个cache, 因此定义了一个静态的cache结构作为这个cache链表的链
    表头:
    /* internal cache of cache description objs */
    static struct kmem_cache cache_cache = {
     .batchcount = 1,
     .limit = BOOT_CPUCACHE_ENTRIES,
     .shared = 1,
     .buffer_size = sizeof(struct kmem_cache),
     .name = "kmem_cache",
    #if DEBUG
     .obj_size = sizeof(struct kmem_cache),
    #endif
    };
    /proc/slabinfo就是这个cache链表的基本信息.

    关于cache, slab, page的关系可大致表示如下:

        cache <-------------------> cache <--------------------->cache
                                      |
                                      V
                                  kmem_list3
                                      |
                   +--------------------------------------+
                   |                  |                   |
                   V                  V                   V
               slab_full         slab_partial         slab_free
                   |                  |                   |
                   V                  V                   V
                 slab               slab                 slab
                   |                  |                   |
                   V                  V                   V
                 page               page                 page
                   |                  |                   |
           +-------------+     +--------------+     +-------------+
           |             |     |              |     |             |
           V             V     V              V     V             V
        object  ...  object   object  ...  object object  ...  object
     
    4. 操作函数

    4.1 基本用法

    为使用kmem_cache, 先要用kmem_cache_create函数创建cache, 如:
    static kmem_cache_t *ip_conntrack_cachep __read_mostly;
     ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
                                             sizeof(struct ip_conntrack), 0,
                                             0, NULL, NULL);
    分配对象空间时使用kmem_cache_alloc函数, 如:
     conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);

    释放对象时kmem_cache_free函数, 如:
     kmem_cache_free(ip_conntrack_cachep, conntrack);

    模块结束,销毁cache时使用kmem_cache_destroy函数, 如:
     kmem_cache_destroy(ip_conntrack_cachep);
     
    4.2 创建cache: kmem_cache_create

    该函数创建kmem_cache结构,要提供该cache的名称,每个单元块的大小参数, 其他参数则可以为0或
    NULL。
    这个函数重点就是根据所需要的内存块大小确定合适的、对齐的slab块大小
    /* mm/slab.c */
    // name是该cache的名称
    // size是cahce中对象的大小, 一般情况下其他参数都可为0或NULL
    // align: 指定size要按align对齐
    struct kmem_cache *
    kmem_cache_create (const char *name, size_t size, size_t align,
     unsigned long flags,
     void (*ctor)(void*, struct kmem_cache *, unsigned long),
     void (*dtor)(void*, struct kmem_cache *, unsigned long))
    {
     size_t left_over, slab_size, ralign;
     struct kmem_cache *cachep = NULL, *pc;
     /*
      * Sanity checks... these are all serious usage bugs.
      */
    // cache名不能为空,不能在中断中分配,每个单元块不能太大,也不能太小
    // 如果定义了析构函数不能没有构造函数
     if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
         (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
      printk(KERN_ERR "%s: Early error in slab %s ", __FUNCTION__,
        name);
      BUG();
     }
     /*
      * Prevent CPUs from coming and going.
      * lock_cpu_hotplug() nests outside cache_chain_mutex
      */
     lock_cpu_hotplug();
    // 锁住cache链表
     mutex_lock(&cache_chain_mutex);
    // 循环cache链表,此为全局链表
     list_for_each_entry(pc, &cache_chain, next) {
      mm_segment_t old_fs = get_fs();
      char tmp;
      int res;
      /*
       * This happens when the module gets unloaded and doesn't
       * destroy its slab cache and no-one else reuses the vmalloc
       * area of the module.  Print a warning.
       */
    // 检查一下cache是否有效,可能会由于模块的释放却没清除掉
      set_fs(KERNEL_DS);
      res = __get_user(tmp, pc->name);
      set_fs(old_fs);
      if (res) {
       printk("SLAB: cache with size %d has lost its name ",
              pc->buffer_size);
       continue;
      }
    // 相同名称的cache已经有了,出错返回
      if (!strcmp(pc->name, name)) {
       printk("kmem_cache_create: duplicate cache %s ", name);
       dump_stack();
       goto oops;
      }
     }
    // 可以忽略DEBUG中的代码
    #if DEBUG
     WARN_ON(strchr(name, ' ')); /* It confuses parsers */
     if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
      /* No constructor, but inital state check requested */
      printk(KERN_ERR "%s: No con, but init state check "
             "requested - %s ", __FUNCTION__, name);
      flags &= ~SLAB_DEBUG_INITIAL;
     }
    #if FORCED_DEBUG
     /*
      * Enable redzoning and last user accounting, except for caches with
      * large objects, if the increased size would increase the object size
      * above the next power of two: caches with object sizes just above a
      * power of two have a significant amount of internal fragmentation.
      */
     if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
      flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
     if (!(flags & SLAB_DESTROY_BY_RCU))
      flags |= SLAB_POISON;
    #endif
     if (flags & SLAB_DESTROY_BY_RCU)
      BUG_ON(flags & SLAB_POISON);
    #endif
     if (flags & SLAB_DESTROY_BY_RCU)
      BUG_ON(dtor);
     /*
      * Always checks flags, a caller might be expecting debug support which
      * isn't available.
      */
     BUG_ON(flags & ~CREATE_MASK);
     /*
      * Check that size is in terms of words.  This is needed to avoid
      * unaligned accesses for some archs when redzoning is used, and makes
      * sure any on-slab bufctl's are also correctly aligned.
      */
    // 将对象长度先按BYTES_PER_WORD扩展对齐, 32位机为4字节对齐
     if (size & (BYTES_PER_WORD - 1)) {
      size += (BYTES_PER_WORD - 1);
      size &= ~(BYTES_PER_WORD - 1);
     }
     /* calculate the final buffer alignment: */
    // 以下根据函数标志计算实际对齐值
     /* 1) arch recommendation: can be overridden for debug */
     if (flags & SLAB_HWCACHE_ALIGN) {
    // 要根据硬件CACHE进行字节对齐,对齐都是2的指数倍
      /*
       * Default alignment: as specified by the arch code.  Except if
       * an object is really small, then squeeze multiple objects into
       * one cacheline.
       */
      ralign = cache_line_size();
      while (size <= ralign / 2)
       ralign /= 2;
     } else {
      ralign = BYTES_PER_WORD;
     }
     /*
      * Redzoning and user store require word alignment. Note this will be
      * overridden by architecture or caller mandated alignment if either
      * is greater than BYTES_PER_WORD.
      */
     if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
      ralign = BYTES_PER_WORD;
     /* 2) arch mandated alignment: disables debug if necessary */
     if (ralign < ARCH_SLAB_MINALIGN) {
      ralign = ARCH_SLAB_MINALIGN;
      if (ralign > BYTES_PER_WORD)
       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
     }
     /* 3) caller mandated alignment: disables debug if necessary */
     if (ralign < align) {
    // 如果根据系统情况计算出的对齐值小于要求的对齐值,用参数里的对齐值
      ralign = align;
      if (ralign > BYTES_PER_WORD)
       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
     }
     /*
      * 4) Store it.
      */
    // 真正的对齐值
     align = ralign;
     /* Get cache's description obj. */
    // 分配cache本身的内存空间,并清零,SLAB_KERNEL标志表明该操作可能会休眠
     cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
     if (!cachep)
      goto oops;
    #if DEBUG
     cachep->obj_size = size;
     /*
      * Both debugging options require word-alignment which is calculated
      * into align above.
      */
     if (flags & SLAB_RED_ZONE) {
      /* add space for red zone words */
      cachep->obj_offset += BYTES_PER_WORD;
      size += 2 * BYTES_PER_WORD;
     }
     if (flags & SLAB_STORE_USER) {
      /* user store requires one word storage behind the end of
       * the real object.
       */
      size += BYTES_PER_WORD;
     }
    #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
     if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
         && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
      cachep->obj_offset += PAGE_SIZE - size;
      size = PAGE_SIZE;
     }
    #endif
    #endif
     /*
      * Determine if the slab management is 'on' or 'off' slab.
      * (bootstrapping cannot cope with offslab caches so don't do
      * it too early on.)
      */
     if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
    // 如果对象大小比较大,设置CFLGS_OFF_SLAB标志
    // (PAGE_SIZE >> 3)在X86下是512
      /*
       * Size is large, assume best to place the slab management obj
       * off-slab (should allow better packing of objs).
       */
      flags |= CFLGS_OFF_SLAB;
    // 根据算出的对齐长度重新对齐内存单元长度
     size = ALIGN(size, align);
    // 计算要分配size大小相对slab大小的阶数,返回每个slab的剩余空间数
     left_over = calculate_slab_order(cachep, size, align, flags);
     if (!cachep->num) {
    // cachep->num为每个slab中的对象数
    // 为0表示找不到合适的内存slab块大小
      printk("kmem_cache_create: couldn't create cache %s. ", name);
      kmem_cache_free(&cache_cache, cachep);
      cachep = NULL;
      goto oops;
     }
    // 对齐slab结构本身大小, 大小包括slab头(struct slab), 以及cachep->num个对象
    // 的控制量的大小, kmem_bufctl_t其实是一个无符合整数
    // typedef unsigned int kmem_bufctl_t
     slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
         + sizeof(struct slab), align);
     /*
      * If the slab has been placed off-slab, and we have enough space then
      * move it on-slab. This is at the expense of any extra colouring.
      */
    // 有OFF_SLAB标志而且slab剩余空间比slab本身还大
     if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
    // 将slab参数移到剩余空间中
      flags &= ~CFLGS_OFF_SLAB;
      left_over -= slab_size;
     }
     if (flags & CFLGS_OFF_SLAB) {
      /* really off slab. No need for manual alignment */
      slab_size =
          cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
     }
    // 填写cache块的基本信息
    // colour_off是根据硬件L1 CACHE元素大小来定
    // 是
     cachep->colour_off = cache_line_size();
     /* Offset must be a multiple of the alignment. */
     if (cachep->colour_off < align)
      cachep->colour_off = align;
    // colour是指在剩余空间中能用的colour_off偏移值的数量
    // 表明能放几个整的L1 CACHE元素
     cachep->colour = left_over / cachep->colour_off;
    // slab控制部分大小
     cachep->slab_size = slab_size;
     cachep->flags = flags;
     cachep->gfpflags = 0;
     if (flags & SLAB_CACHE_DMA)
      cachep->gfpflags |= GFP_DMA;
    // 实际内存缓冲区大小
     cachep->buffer_size = size;
     if (flags & CFLGS_OFF_SLAB) {
      cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
      /*
       * This is a possibility for one of the malloc_sizes caches.
       * But since we go off slab only for object size greater than
       * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
       * this should not happen at all.
       * But leave a BUG_ON for some lucky dude.
       */
      BUG_ON(!cachep->slabp_cache);
     }
     cachep->ctor = ctor;
     cachep->dtor = dtor;
     cachep->name = name;
    // 建立每个CPU各自的cache数据
     if (setup_cpu_cache(cachep)) {
      __kmem_cache_destroy(cachep);
      cachep = NULL;
      goto oops;
     }
     /* cache setup completed, link it into the list */
    // 将新建的cache块挂接到cache链表
     list_add(&cachep->next, &cache_chain);
    oops:
     if (!cachep && (flags & SLAB_PANIC))
      panic("kmem_cache_create(): failed to create slab `%s' ",
            name);
     mutex_unlock(&cache_chain_mutex);
     unlock_cpu_hotplug();
     return cachep;
    }
    // 该函数可在内核模块中访问
    EXPORT_SYMBOL(kmem_cache_create);
     

    4.3 分配单元kmem_cache_(z)alloc()

    有kmem_cache_alloc()和kmem_cache_zalloc()两个函数,后者只是增加将分配出的单元空间清零的
    操作。这两个函数返回分配好的cache单元空间
    /* mm/slab.c */
    /**
     * kmem_cache_alloc - Allocate an object
     * @cachep: The cache to allocate from.
     * @flags: See kmalloc().
     *
     * Allocate an object from this cache.  The flags are only relevant
     * if the cache has no available objects.
     */
    void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    {
     return __cache_alloc(cachep, flags, __builtin_return_address(0));
    }
    EXPORT_SYMBOL(kmem_cache_alloc);
    /**
     * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
     * @cache: The cache to allocate from.
     * @flags: See kmalloc().
     *
     * Allocate an object from this cache and set the allocated memory to zero.
     * The flags are only relevant if the cache has no available objects.
     */
    void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
    {
     void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
     if (ret)
      memset(ret, 0, obj_size(cache));
     return ret;
    }
    EXPORT_SYMBOL(kmem_cache_zalloc);
    这两个函数核心都是调用__cahce_alloc函数来分配cache:
    // 两个下划线的cache_alloc
    // 在内核配置了CONFIG_NUMA时NUMA_BUILD为1,否则为0
    static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
          gfp_t flags, void *caller)
    {
     unsigned long save_flags;
     void *objp = NULL;
     cache_alloc_debugcheck_before(cachep, flags);
     local_irq_save(save_flags);
     if (unlikely(NUMA_BUILD &&
       current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
    // 进入此处的可能性比较小
      objp = alternate_node_alloc(cachep, flags);
     if (!objp)
    // 主要是进入该函数分配,这个是4下划线的cache_cache
      objp = ____cache_alloc(cachep, flags);
     /*
      * We may just have run out of memory on the local node.
      * __cache_alloc_node() knows how to locate memory on other nodes
      */
      if (NUMA_BUILD && !objp)
       objp = __cache_alloc_node(cachep, flags, numa_node_id());
     local_irq_restore(save_flags);
    // 实际为objp=objp, 没啥操作
     objp = cache_alloc_debugcheck_after(cachep, flags, objp,
             caller);
     prefetchw(objp);
     return objp;
    }

    // 重点还是这个四个下划线的cache_alloc
    static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    {
     void *objp;
     struct array_cache *ac;
     check_irq_off();
    // 每个cpu对应的cache数组
     ac = cpu_cache_get(cachep);
     if (likely(ac->avail)) {
    // 当前cache单元空间中有元素,不用重新分配,将缓冲的cache返回
      STATS_INC_ALLOCHIT(cachep);
      ac->touched = 1;
      objp = ac->entry[--ac->avail];
     } else {
    // 否则新分配cache单元
      STATS_INC_ALLOCMISS(cachep);
      objp = cache_alloc_refill(cachep, flags);
     }
     return objp;
    }

    // 分配cache单元
    static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
    {
     int batchcount;
     struct kmem_list3 *l3;
     struct array_cache *ac;
     int node;
    // cpu到node值的转换
     node = numa_node_id();
     check_irq_off();
    // 每个cpu对应的cache数组
     ac = cpu_cache_get(cachep);
    retry:
    // 一次批量分配的数量, 分配是批量进行, 这样不用每次请求都分配操作一次
     batchcount = ac->batchcount;
     if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
      /*
       * If there was little recent activity on this cache, then
       * perform only a partial refill.  Otherwise we could generate
       * refill bouncing.
       */
      batchcount = BATCHREFILL_LIMIT;
     }
    // 和CPU对应的具体list3链表
     l3 = cachep->nodelists[node];
     BUG_ON(ac->avail > 0 || !l3);
     spin_lock(&l3->list_lock);
     /* See if we can refill from the shared array */
    // 可从共享的数组中获取空间
     if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
      goto alloc_done;
    // 批量循环
     while (batchcount > 0) {
      struct list_head *entry;
      struct slab *slabp;
      /* Get slab alloc is to come from. */
    // 从部分使用的slab链表中获取链表元素
      entry = l3->slabs_partial.next;
      if (entry == &l3->slabs_partial) {
    // 已经到链表头,说明该部分使用的slab链表已经都用完了
    // 得从空闲slab链表中找空间了
       l3->free_touched = 1;
       entry = l3->slabs_free.next;
       if (entry == &l3->slabs_free)
    // 空闲slab链表也用完了, 整个cache该增加了
        goto must_grow;
      }
    // 获取可用的slab指针
      slabp = list_entry(entry, struct slab, list);
      check_slabp(cachep, slabp);
      check_spinlock_acquired(cachep);
    // 从该slab块中批量提取可用的对象数
      while (slabp->inuse < cachep->num && batchcount--) {
       STATS_INC_ALLOCED(cachep);
       STATS_INC_ACTIVE(cachep);
       STATS_SET_HIGH(cachep);
    // avail记录了实际分配出的对象数
       ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
               node);
      }
      check_slabp(cachep, slabp);
      /* move slabp to correct slabp list: */
    // 把该slab先从所在链表断开
      list_del(&slabp->list);
    // 根据是否slab中的对象已经用完,将slab挂到全部使用链表或部分使用链表
      if (slabp->free == BUFCTL_END)
       list_add(&slabp->list, &l3->slabs_full);
      else
       list_add(&slabp->list, &l3->slabs_partial);
     }
    must_grow:
    // 已经分配了一些对象出去, 减少空闲对象数
     l3->free_objects -= ac->avail;
    alloc_done:
     spin_unlock(&l3->list_lock);
     if (unlikely(!ac->avail)) {
    // avail为0, 表示没有可分配的对象了, cache必须增大了
      int x;
    // 增加cache中内存,增加slab数
      x = cache_grow(cachep, flags, node);
      /* cache_grow can reenable interrupts, then ac could change. */
      ac = cpu_cache_get(cachep);
      if (!x && ac->avail == 0) /* no objects in sight? abort */
       return NULL;
      if (!ac->avail)  /* objects refilled by interrupt? */
       goto retry;
     }
     ac->touched = 1;
    // 返回对象指针
     return ac->entry[--ac->avail];
    }
     
    4.4 释放cache单元kmem_cache_free

    其实不是真正完全释放, 只是将对象空间添回cache的空闲slab链表中而已
    /**
     * kmem_cache_free - Deallocate an object
     * @cachep: The cache the allocation was from.
     * @objp: The previously allocated object.
     *
     * Free an object which was previously allocated from this
     * cache.
     */
    // 其实只是__cache_free()的包裹函数
    void kmem_cache_free(struct kmem_cache *cachep, void *objp)
    {
     unsigned long flags;
     BUG_ON(virt_to_cache(objp) != cachep);
     local_irq_save(flags);
     __cache_free(cachep, objp);
     local_irq_restore(flags);
    }
    EXPORT_SYMBOL(kmem_cache_free);

    /*
     * Release an obj back to its cache. If the obj has a constructed state, it must
     * be in this state _before_ it is released.  Called with disabled ints.
     */
    static inline void __cache_free(struct kmem_cache *cachep, void *objp)
    {
     struct array_cache *ac = cpu_cache_get(cachep);
     check_irq_off();
     objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
     if (cache_free_alien(cachep, objp))
      return;
     if (likely(ac->avail < ac->limit)) {
    // 空闲值小于限值
      STATS_INC_FREEHIT(cachep);
    // 只是简单将要释放的cache单元添加到空闲单元数组中
    // avail增加表示可用对象增加
      ac->entry[ac->avail++] = objp;
      return;
     } else {
    // 空闲数大于等于限值
      STATS_INC_FREEMISS(cachep);
    // 释放一些节点
      cache_flusharray(cachep, ac);
    // 再将要释放的cache单元添加到空闲单元数组中
    // avail增加表示可用对象增加
      ac->entry[ac->avail++] = objp;
     }
    }
     
    4.5 摧毁cache结构

    这个一般是在模块退出函数中进行清理工作时调用的,如果已经编到内核了, 那这个函数基本不会被
    调用:
    /**
     * kmem_cache_destroy - delete a cache
     * @cachep: the cache to destroy
     *
     * Remove a struct kmem_cache object from the slab cache.
     *
     * It is expected this function will be called by a module when it is
     * unloaded.  This will remove the cache completely, and avoid a duplicate
     * cache being allocated each time a module is loaded and unloaded, if the
     * module doesn't have persistent in-kernel storage across loads and unloads.
     *
     * The cache must be empty before calling this function.
     *
     * The caller must guarantee that noone will allocate memory from the cache
     * during the kmem_cache_destroy().
     */
    void kmem_cache_destroy(struct kmem_cache *cachep)
    {
     BUG_ON(!cachep || in_interrupt());
     /* Don't let CPUs to come and go */
     lock_cpu_hotplug();
     /* Find the cache in the chain of caches. */
     mutex_lock(&cache_chain_mutex);
     /*
      * the chain is never empty, cache_cache is never destroyed
      */
    // cache的第一个元素cache_cache是静态量,该链表永远不会空
    // 从cache链表中删除cache
     list_del(&cachep->next);
     mutex_unlock(&cache_chain_mutex);
    // 尽可能释放cache中的slab单元块
     if (__cache_shrink(cachep)) {
      slab_error(cachep, "Can't free all objects");
      mutex_lock(&cache_chain_mutex);
      list_add(&cachep->next, &cache_chain);
      mutex_unlock(&cache_chain_mutex);
      unlock_cpu_hotplug();
      return;
     }
     if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
      synchronize_rcu();
    // 释放cache
     __kmem_cache_destroy(cachep);
     unlock_cpu_hotplug();
    }
    EXPORT_SYMBOL(kmem_cache_destroy);

    // 真正的摧毁cache函数
    static void __kmem_cache_destroy(struct kmem_cache *cachep)
    {
     int i;
     struct kmem_list3 *l3;
    // 释放cache中所有CPU的数组
     for_each_online_cpu(i)
         kfree(cachep->array[i]);
     /* NUMA: free the list3 structures */
    // 释放list3中的所有内存
     for_each_online_node(i) {
      l3 = cachep->nodelists[i];
      if (l3) {
       kfree(l3->shared);
       free_alien_cache(l3->alien);
       kfree(l3);
      }
     }
    // 释放cache本身
     kmem_cache_free(&cache_cache, cachep);
    }

    4.6 缩减cache

    该函数尽可能地释放cache中的slab块, 当cache空闲空间太多时会释放掉一些内存供其他内核部分使
    用.

    /**
     * kmem_cache_shrink - Shrink a cache.
     * @cachep: The cache to shrink.
     *
     * Releases as many slabs as possible for a cache.
     * To help debugging, a zero exit status indicates all slabs were released.
     */
    // 只是一个包裹函数
    int kmem_cache_shrink(struct kmem_cache *cachep)
    {
     BUG_ON(!cachep || in_interrupt());
     return __cache_shrink(cachep);
    }
    EXPORT_SYMBOL(kmem_cache_shrink);
     
    static int __cache_shrink(struct kmem_cache *cachep)
    {
     int ret = 0, i = 0;
     struct kmem_list3 *l3;
    // 释放cache中每个CPU对应的空间
     drain_cpu_caches(cachep);
     check_irq_on();
     for_each_online_node(i) {
    // 释放每个节点的list3
      l3 = cachep->nodelists[i];
      if (!l3)
       continue;
    // 将slab从slab_free中释放
      drain_freelist(cachep, l3, l3->free_objects);
      ret += !list_empty(&l3->slabs_full) ||
       !list_empty(&l3->slabs_partial);
     }
     return (ret ? 1 : 0);
    }

    static void drain_cpu_caches(struct kmem_cache *cachep)
    {
     struct kmem_list3 *l3;
     int node;
     on_each_cpu(do_drain, cachep, 1, 1);
     check_irq_on();
     for_each_online_node(node) {
      l3 = cachep->nodelists[node];
      if (l3 && l3->alien)
    // 释放cache的list3的alien部分
       drain_alien_cache(cachep, l3->alien);
     }
     for_each_online_node(node) {
      l3 = cachep->nodelists[node];
      if (l3)
    // 释放list3的数组空间
       drain_array(cachep, l3, l3->shared, 1, node);
     }
    }
    /*
     * Remove slabs from the list of free slabs.
     * Specify the number of slabs to drain in tofree.
     *
     * Returns the actual number of slabs released.
     */
    static int drain_freelist(struct kmem_cache *cache,
       struct kmem_list3 *l3, int tofree)
    {
     struct list_head *p;
     int nr_freed;
     struct slab *slabp;
     nr_freed = 0;
    // 从slabs_free链表释放
     while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
      spin_lock_irq(&l3->list_lock);
      p = l3->slabs_free.prev;
      if (p == &l3->slabs_free) {
       spin_unlock_irq(&l3->list_lock);
       goto out;
      }
    // 获取slab
      slabp = list_entry(p, struct slab, list);
    #if DEBUG
      BUG_ON(slabp->inuse);
    #endif
    // 将slab从链表中删除
      list_del(&slabp->list);
      /*
       * Safe to drop the lock. The slab is no longer linked
       * to the cache.
       */
    // 空闲对象数减少一个slab中的对象数
      l3->free_objects -= cache->num;
      spin_unlock_irq(&l3->list_lock);
    // 释放slab
      slab_destroy(cache, slabp);
      nr_freed++;
     }
    out:
     return nr_freed;
    }
     
    4.7  kmalloc和kfree
    kmalloc也是通过cache来实现的, 只不过每此kmalloc的大小不同, 因此是从不同的cache中分配:
    /* include/linux/slab.h */
    // 注意kmalloc是在头文件中定义的
    static inline void *kmalloc(size_t size, gfp_t flags)
    {
     if (__builtin_constant_p(size)) {
    // 以下是找一个对象大小刚好大于等于size的cache
      int i = 0;
    #define CACHE(x)
      if (size <= x)
       goto found;
      else
       i++;
    #include "kmalloc_sizes.h"
    #undef CACHE
      {
       extern void __you_cannot_kmalloc_that_much(void);
       __you_cannot_kmalloc_that_much();
      }
    found:
    // 实际还是通过kmem_cache_alloc来分配内存空间, 因此也是cache
      return kmem_cache_alloc((flags & GFP_DMA) ?
       malloc_sizes[i].cs_dmacachep :
       malloc_sizes[i].cs_cachep, flags);
     }
    // 通过该函数最后也是由__cache_alloc()函数来分配空间
     return __kmalloc(size, flags);
    }

    // 这是kmalloc_sizes.h文件内容, 实际就是定义CACHE中可用的对象大小
    // 普通情况下最大是128K, 也就是kmalloc能分配的最大内存量
    #if (PAGE_SIZE == 4096)
     CACHE(32)
    #endif
     CACHE(64)
    #if L1_CACHE_BYTES < 64
     CACHE(96)
    #endif
     CACHE(128)
    #if L1_CACHE_BYTES < 128
     CACHE(192)
    #endif
     CACHE(256)
     CACHE(512)
     CACHE(1024)
     CACHE(2048)
     CACHE(4096)
     CACHE(8192)
     CACHE(16384)
     CACHE(32768)
     CACHE(65536)
     CACHE(131072)
    #if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU)
     CACHE(262144)
    #endif
    #ifndef CONFIG_MMU
     CACHE(524288)
     CACHE(1048576)
    #ifdef CONFIG_LARGE_ALLOCS
     CACHE(2097152)
     CACHE(4194304)
     CACHE(8388608)
     CACHE(16777216)
     CACHE(33554432)
    #endif /* CONFIG_LARGE_ALLOCS */
    #endif /* CONFIG_MMU
    /* mm/slab.c */
    // kfree实际也是调用__cache_free来释放空间
    void kfree(const void *objp)
    {
     struct kmem_cache *c;
     unsigned long flags;
     if (unlikely(!objp))
      return;
     local_irq_save(flags);
     kfree_debugcheck(objp);
     c = virt_to_cache(objp);
     debug_check_no_locks_freed(objp, obj_size(c));
     __cache_free(c, (void *)objp);
     local_irq_restore(flags);
    }
    EXPORT_SYMBOL(kfree);
     
    5. 结论

    cache的使用使得在频繁增加删除对象的处理效率得到提高, 这也就是为什么普通情况下
    从/proc/meminfo中看Linux的空闲内存不多的原因,因为很多内存都是cache的,没有真正释放。
  • 相关阅读:
    MySQL的排序方式
    Hibernate中查询优化策略
    kafka实现SASL_PLAINTEXT权限认证·集成springboot篇
    kafka实现SASL_PLAINTEXT权限认证·服务器篇
    SpringMvc服务端实现跨域请求解决方案
    maven打包日志输出优化-去掉泛型与过时的警告
    SpringMVC之控制器的单例和多例管理
    springmvc中的controller是单例的
    com.caucho.hessian.io.HessianProtocolException: is unknown code 解决方案
    浅谈大型web系统架构
  • 原文地址:https://www.cnblogs.com/sky-heaven/p/5390121.html
Copyright © 2011-2022 走看看