zoukankan      html  css  js  c++  java
  • 内核源码分析之进程地址空间(基于3.16-rc4)

    所谓进程的地址空间,指的就是进程的虚拟地址空间。当创建一个进程时,内核会为该进程分配一个线性的地址空间(虚拟地址空间),有了虚拟地址空间后,内核就可以通过页表将进程的物理地址地址空间映射到其虚拟地址空间中,程序员所能看到的其实都是虚拟地址,物理地址对程序员而言是透明的。当程序运行时,MMU硬件机制会将程序中的虚拟地址转换成物理地址,然后在内存中找到指令和数据,来执行进程的代码。下面我们就来分析和进程的地址空间相关的各种数据结构和操作。

    用到的数据结构:

    1.内存描述符struct mm_struct (include/linux/mm_types.h)

      1 struct mm_struct {
      2     struct vm_area_struct *mmap;        /* list of VMAs */
      3     struct rb_root mm_rb;
      4     u32 vmacache_seqnum;                   /* per-thread vmacache */
      5 #ifdef CONFIG_MMU
      6     unsigned long (*get_unmapped_area) (struct file *filp,
      7                 unsigned long addr, unsigned long len,
      8                 unsigned long pgoff, unsigned long flags);
      9 #endif
     10     unsigned long mmap_base;        /* base of mmap area */
     11     unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
     12     unsigned long task_size;        /* size of task vm space */
     13     unsigned long highest_vm_end;        /* highest vma end address */
     14     pgd_t * pgd;
     15     atomic_t mm_users;            /* How many users with user space? */
     16     atomic_t mm_count;            /* How many references to "struct mm_struct" (users count as 1) */
     17     atomic_long_t nr_ptes;            /* Page table pages */
     18     int map_count;                /* number of VMAs */
     19 
     20     spinlock_t page_table_lock;        /* Protects page tables and some counters */
     21     struct rw_semaphore mmap_sem;
     22 
     23     struct list_head mmlist;        /* List of maybe swapped mm's.    These are globally strung
     24                          * together off init_mm.mmlist, and are protected
     25                          * by mmlist_lock
     26                          */
     27 
     28 
     29     unsigned long hiwater_rss;    /* High-watermark of RSS usage */
     30     unsigned long hiwater_vm;    /* High-water virtual memory usage */
     31 
     32     unsigned long total_vm;        /* Total pages mapped */
     33     unsigned long locked_vm;    /* Pages that have PG_mlocked set */
     34     unsigned long pinned_vm;    /* Refcount permanently increased */
     35     unsigned long shared_vm;    /* Shared pages (files) */
     36     unsigned long exec_vm;        /* VM_EXEC & ~VM_WRITE */
     37     unsigned long stack_vm;        /* VM_GROWSUP/DOWN */
     38     unsigned long def_flags;
     39     unsigned long start_code, end_code, start_data, end_data;
     40     unsigned long start_brk, brk, start_stack;
     41     unsigned long arg_start, arg_end, env_start, env_end;
     42 
     43     unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
     44 
     45     /*
     46      * Special counters, in some configurations protected by the
     47      * page_table_lock, in other configurations by being atomic.
     48      */
     49     struct mm_rss_stat rss_stat;
     50 
     51     struct linux_binfmt *binfmt;
     52 
     53     cpumask_var_t cpu_vm_mask_var;
     54 
     55     /* Architecture-specific MM context */
     56     mm_context_t context;
     57 
     58     unsigned long flags; /* Must use atomic bitops to access the bits */
     59 
     60     struct core_state *core_state; /* coredumping support */
     61 #ifdef CONFIG_AIO
     62     spinlock_t            ioctx_lock;
     63     struct kioctx_table __rcu    *ioctx_table;
     64 #endif
     65 #ifdef CONFIG_MEMCG
     66     /*
     67      * "owner" points to a task that is regarded as the canonical
     68      * user/owner of this mm. All of the following must be true in
     69      * order for it to be changed:
     70      *
     71      * current == mm->owner
     72      * current->mm != mm
     73      * new_owner->mm == mm
     74      * new_owner->alloc_lock is held
     75      */
     76     struct task_struct __rcu *owner;
     77 #endif
     78 
     79     /* store ref to file /proc/<pid>/exe symlink points to */
     80     struct file *exe_file;
     81 #ifdef CONFIG_MMU_NOTIFIER
     82     struct mmu_notifier_mm *mmu_notifier_mm;
     83 #endif
     84 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
     85     pgtable_t pmd_huge_pte; /* protected by page_table_lock */
     86 #endif
     87 #ifdef CONFIG_CPUMASK_OFFSTACK
     88     struct cpumask cpumask_allocation;
     89 #endif
     90 #ifdef CONFIG_NUMA_BALANCING
     91     /*
     92      * numa_next_scan is the next time that the PTEs will be marked
     93      * pte_numa. NUMA hinting faults will gather statistics and migrate
     94      * pages to new nodes if necessary.
     95      */
     96     unsigned long numa_next_scan;
     97 
     98     /* Restart point for scanning and setting pte_numa */
     99     unsigned long numa_scan_offset;
    100 
    101     /* numa_scan_seq prevents two threads setting pte_numa */
    102     int numa_scan_seq;
    103 #endif
    104 #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
    105     /*
    106      * An operation with batched TLB flushing is going on. Anything that
    107      * can move process memory needs to flush the TLB when moving a
    108      * PROT_NONE or PROT_NUMA mapped page.
    109      */
    110     bool tlb_flush_pending;
    111 #endif
    112     struct uprobes_state uprobes_state;
    113 };
    struct mm_struct

    每个进程描述符中都包含一个内存描述符,用来存放进程的虚拟地址空间。该结构体中的成员比较多,我们简要的介绍几个,以后用到时再具体分析。第2行的mmap指向了该虚拟地址空间中的第一个线性区,下边有介绍。内核将该虚拟地址空间中的所有线性区组织成一棵红黑数,第3行的mm_rb指向了红黑树的树根。第10行保存了虚拟地址空间的开始地址,第12行保存了该虚拟地址空间的大小,第13行保存了虚拟地址空间的结束地址。第14行pgd保存了进程的页全局目录表的地址。第18行保存了该虚拟地址空间的线性区个数。内核将所有进程的内存描述符(每个进程有一个)通过第23行的mmlist组织成双向链表。第39行分别代表虚拟地址空间中的代码段的起始和结束地址,数据段的起始和结束地址。第40行分别代表堆区的起始和结束地址,堆栈的起始地址。第41行分别代表了进程的命令行参数起始和结束地址,环境变量的起始和结束地址。对于内核线程而言,没有线性区。

    2.线性区struct vm_area_struct(include/linux/mm_types.h)

     1 struct vm_area_struct {
     2     /* The first cache line has the info for VMA tree walking. */
     3 
     4     unsigned long vm_start;        /* Our start address within vm_mm. */
     5     unsigned long vm_end;        /* The first byte after our end address
     6                        within vm_mm. */
     7 
     8     /* linked list of VM areas per task, sorted by address */
     9     struct vm_area_struct *vm_next, *vm_prev;
    10 
    11     struct rb_node vm_rb;
    12 
    13     /*
    14      * Largest free memory gap in bytes to the left of this VMA.
    15      * Either between this VMA and vma->vm_prev, or between one of the
    16      * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
    17      * get_unmapped_area find a free area of the right size.
    18      */
    19     unsigned long rb_subtree_gap;
    20 
    21     /* Second cache line starts here. */
    22 
    23     struct mm_struct *vm_mm;    /* The address space we belong to. */
    24     pgprot_t vm_page_prot;        /* Access permissions of this VMA. */
    25     unsigned long vm_flags;        /* Flags, see mm.h. */
    26 
    27     /*
    28      * For areas with an address space and backing store,
    29      * linkage into the address_space->i_mmap interval tree, or
    30      * linkage of vma in the address_space->i_mmap_nonlinear list.
    31      */
    32     union {
    33         struct {
    34             struct rb_node rb;
    35             unsigned long rb_subtree_last;
    36         } linear;
    37         struct list_head nonlinear;
    38     } shared;
    39 
    40     /*
    41      * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
    42      * list, after a COW of one of the file pages.    A MAP_SHARED vma
    43      * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
    44      * or brk vma (with NULL file) can only be in an anon_vma list.
    45      */
    46     struct list_head anon_vma_chain; /* Serialized by mmap_sem &
    47                       * page_table_lock */
    48     struct anon_vma *anon_vma;    /* Serialized by page_table_lock */
    49 
    50     /* Function pointers to deal with this struct. */
    51     const struct vm_operations_struct *vm_ops;
    52 
    53     /* Information about our backing store: */
    54     unsigned long vm_pgoff;        /* Offset (within vm_file) in PAGE_SIZE
    55                        units, *not* PAGE_CACHE_SIZE */
    56     struct file * vm_file;        /* File we map to (can be NULL). */
    57     void * vm_private_data;        /* was vm_pte (shared mem) */
    58 
    59 #ifndef CONFIG_MMU
    60     struct vm_region *vm_region;    /* NOMMU mapping region */
    61 #endif
    62 #ifdef CONFIG_NUMA
    63     struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
    64 #endif
    65 };
    struct vm_area_struct

    从上边可以看到,每个内存描述符中包含若干线性区,并且组织成红黑树的形式。内存描述符代表了进程的整个虚拟地址空间,包括了数据段,代码段,堆栈段等等,而每个线性区则用来表示一个段(区),比如代码段和数据段等。因此内存描述符中必然会有多个线性区。对于大型进程而言(比如数据库),它的线性区非常多,这就给搜索线性区带来很大挑战,于是将线性区组织成红黑树的形式,便于快速搜索。下面简单介绍下该结构体中的成员。第4-5行保存了该线性区的起始和结束虚拟地址。内存描述符中的所有线性区会用两种数据结构进行组织,一个是双向链表,另一个就是红黑树,从第9和11行可看出。使用两种数据结构来组织,更加方便于管理,当线性区数量不太多的时候,使用链表来搜索方便又快捷;当线性区数量很庞大时,链表就捉襟见肘了,此时红黑树就能派上用场。二者相互补充相互配合。第23行指向了该线性区所属的内存描述符。第24行存放该线性区的访问权限。第25行存放该线性区所涉及页的相关标志(这些标志表明页是可读的可写的还是可执行的等等)。这些标志定义在include/linux/mm.h中。线性区中的这些访问权限和标志用来设置页表的权限和标志。

    3.红黑树节点(include/linux/rbtree.h)

    1 struct rb_node {
    2     unsigned long  __rb_parent_color;
    3     struct rb_node *rb_right;
    4     struct rb_node *rb_left;
    5 } __attribute__((aligned(sizeof(long))));

    第2行__rb_parent_color隐含的指向了其父节点,其中最低的1bit保存了该节点的颜色(红或黑)。对任何体系结构而言,指针都要求至少要四字节对齐,因此最低的两个bit位无用,可以保存颜色信息。在使用父指针时,要屏蔽掉最低两位,使用rb_parent函数提取父指针。第3-4行分别指向节点的左,右孩子。

    线性区的处理函数:

    1.find_vma函数(mm/mmap.c)

     1 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
     2 {
     3     struct rb_node *rb_node;
     4     struct vm_area_struct *vma;
     5 
     6     /* Check the cache first. */
     7     vma = vmacache_find(mm, addr);
     8     if (likely(vma))
     9         return vma;
    10 
    11     rb_node = mm->mm_rb.rb_node;
    12     vma = NULL;
    13 
    14     while (rb_node) {
    15         struct vm_area_struct *tmp;
    16 
    17         tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
    18 
    19         if (tmp->vm_end > addr) {
    20             vma = tmp;
    21             if (tmp->vm_start <= addr)
    22                 break;
    23             rb_node = rb_node->rb_left;
    24         } else
    25             rb_node = rb_node->rb_right;
    26     }
    27 
    28     if (vma)
    29         vmacache_update(addr, vma);
    30     return vma;
    31 }

    该函数用来查找addr地址所在的线性区,返回找到的线性区指针。该函数接收两个参数,一个是内核描述符,另一个是虚拟地址,所找到的线性区要么包含addr地址,要么在addr地址的右侧(不包含addr地址)。进程描述符中的vmacache[i]域中装有若干刚使用过的线性区,那么按照程序执行的局部性原理,新给出的虚拟地址在这些线性区中的概率非常大,因此第7行使用vmacache_find函数现在vmacache数组中查找,若找到直接返回。否则,就在红黑树中找。第11行将红黑树的树根保存到rb_node中,第14-26行,遍历整棵红黑树,第17行获取红黑树节点rb_node所在的线性区描述符指针保存到tmp中,第19行如果该线性区的vm_end>addr,那么addr一定在该线性区或者该线性区之前的线性区,则遍历红黑树的左子树,否则遍历右子树,第21行如果addr>=某线性区的vm_start,无疑addr处于该线性区中。(线性区既然组织成红黑树的形式,那么根的左子树所有vm_end一定小于根,根的右子树所有vm_end一定大于根),第29行将找到的线性区更新至vmacache数组中,第30行返回找到的线性区指针,如果vma红黑树为空(没有任何线性区),返回null。

    2.find_vma_intersection函数(include/linux/mm.h)

    1 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
    2 {
    3     struct vm_area_struct * vma = find_vma(mm,start_addr);
    4 
    5     if (vma && end_addr <= vma->vm_start)
    6         vma = NULL;
    7     return vma;
    8 }

    该进程查找一个和以start_addr地址开始,以end_addr结束的线性区之后的第一个线性区(可以相连)。

    3.get_unmapped_area函数(mm/mmap.c)

     1 unsigned long
     2 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
     3         unsigned long pgoff, unsigned long flags)
     4 {
     5     unsigned long (*get_area)(struct file *, unsigned long,
     6                   unsigned long, unsigned long, unsigned long);
     7 
     8     unsigned long error = arch_mmap_check(addr, len, flags);
     9     if (error)
    10         return error;
    11 
    12     /* Careful about overflows.. */
    13     if (len > TASK_SIZE)
    14         return -ENOMEM;
    15 
    16     get_area = current->mm->get_unmapped_area;
    17     if (file && file->f_op->get_unmapped_area)
    18         get_area = file->f_op->get_unmapped_area;
    19     addr = get_area(file, addr, len, pgoff, flags);
    20     if (IS_ERR_VALUE(addr))
    21         return addr;
    22 
    23     if (addr > TASK_SIZE - len)
    24         return -ENOMEM;
    25     if (addr & ~PAGE_MASK)
    26         return -EINVAL;
    27 
    28     addr = arch_rebalance_pgtables(addr, len);
    29     error = security_mmap_addr(addr);
    30     return error ? error : addr;
    31 }

    该函数用来查找起始地址为addr,长度为len的空闲区域(从线性区和线性区之间的空隙查找)。如果是为了文件的内核映射而查找,那么参数file不为空,第18行,get_area指针指向file结构体中的get_unmapped_area函数;如果是为了匿名映射查找,第16行get_area指针指向内存描述符中的get_unmapped_area函数。第19行执行get_area函数。file结构体中的get_unmapped_area函数我们不讨论,只讨论内存描述符中的该函数。内存描述符中的该函数有两个版本,分别是arch_get_unmapped_area和arch_get_unmapped_area_topdown,前者从线性地址的低地址向高端地址方向查找,后者从用户态堆栈开始向低地址方向查找,根据需求选择不同的函数。下面分析下arch_get_unmapped_area函数的代码(mm/mmap.c):

     1 unsigned long
     2 arch_get_unmapped_area(struct file *filp, unsigned long addr,
     3         unsigned long len, unsigned long pgoff, unsigned long flags)
     4 {
     5     struct mm_struct *mm = current->mm;
     6     struct vm_area_struct *vma;
     7     struct vm_unmapped_area_info info;
     8 
     9     if (len > TASK_SIZE - mmap_min_addr)
    10         return -ENOMEM;
    11 
    12     if (flags & MAP_FIXED)
    13         return addr;
    14 
    15     if (addr) {
    16         addr = PAGE_ALIGN(addr);
    17         vma = find_vma(mm, addr);
    18         if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
    19             (!vma || addr + len <= vma->vm_start))
    20             return addr;
    21     }
    22 
    23     info.flags = 0;
    24     info.length = len;
    25     info.low_limit = mm->mmap_base;
    26     info.high_limit = TASK_SIZE;
    27     info.align_mask = 0;
    28     return vm_unmapped_area(&info);
    29 }

    第15行将addr地址按照页面地址向上对齐(调整为4K的整数倍),第17行可看出该函数是通过调用find_vma实现查找,找到的vma中可能包含addr(这种情况下就说明以addr地址开始没有可用的vma),也可能在addr之后,还可能为null,后两者才表示存在起始地址为addr,长度为len的空闲区域。第18行如果0<=找到的地址+len<=用户虚拟地址空间大小TASK_SIZE(4TB),并且要么vma为空(说明vma红黑树为空,没有地址被线性区占用,所有地址都空闲),要么vma在addr之后,同时addr+len不能延伸到vma中,返回addr地址。否则的话,说明没有满足起始地址为addr,长度为len的空闲区域,就执行第28行的函数,从整个虚拟地址空间中找满足长度len的空闲区域。下面看下vm_unmapped_area函数(include/linux/mm.h)

    1 static inline unsigned long
    2 vm_unmapped_area(struct vm_unmapped_area_info *info)
    3 {
    4     if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
    5         return unmapped_area(info);
    6     else
    7         return unmapped_area_topdown(info);
    8 }

    该函数调用unmapped_area或者unmapped_area_topdown来查找空闲的区域。我们分析下前者,后者和前者类似。代码如下:

      1 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
      2 {
      3     /*
      4      * We implement the search by looking for an rbtree node that
      5      * immediately follows a suitable gap. That is,
      6      * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
      7      * - gap_end   = vma->vm_start        >= info->low_limit  + length;
      8      * - gap_end - gap_start >= length
      9      */
     10 
     11     struct mm_struct *mm = current->mm;
     12     struct vm_area_struct *vma;
     13     unsigned long length, low_limit, high_limit, gap_start, gap_end;
     14 
     15     /* Adjust search length to account for worst case alignment overhead */
     16     length = info->length + info->align_mask;
     17     if (length < info->length)
     18         return -ENOMEM;
     19 
     20     /* Adjust search limits by the desired length */
     21     if (info->high_limit < length)
     22         return -ENOMEM;
     23     high_limit = info->high_limit - length;
     24 
     25     if (info->low_limit > high_limit)
     26         return -ENOMEM;
     27     low_limit = info->low_limit + length;
     28 
     29     /* Check if rbtree root looks promising */
     30     if (RB_EMPTY_ROOT(&mm->mm_rb))
     31         goto check_highest;
     32     vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
     33     if (vma->rb_subtree_gap < length)
     34         goto check_highest;
     35 
     36     while (true) {
     37         /* Visit left subtree if it looks promising */
     38         gap_end = vma->vm_start;
     39         if (gap_end >= low_limit && vma->vm_rb.rb_left) {
     40             struct vm_area_struct *left =
     41                 rb_entry(vma->vm_rb.rb_left,
     42                      struct vm_area_struct, vm_rb);
     43             if (left->rb_subtree_gap >= length) {
     44                 vma = left;
     45                 continue;
     46             }
     47         }
     48 
     49         gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
     50 check_current:
     51         /* Check if current node has a suitable gap */
     52         if (gap_start > high_limit)
     53             return -ENOMEM;
     54         if (gap_end >= low_limit && gap_end - gap_start >= length)
     55             goto found;
     56 
     57         /* Visit right subtree if it looks promising */
     58         if (vma->vm_rb.rb_right) {
     59             struct vm_area_struct *right =
     60                 rb_entry(vma->vm_rb.rb_right,
     61                      struct vm_area_struct, vm_rb);
     62             if (right->rb_subtree_gap >= length) {
     63                 vma = right;
     64                 continue;
     65             }
     66         }
     67 
     68         /* Go back up the rbtree to find next candidate node */
     69         while (true) {
     70             struct rb_node *prev = &vma->vm_rb;
     71             if (!rb_parent(prev))
     72                 goto check_highest;
     73             vma = rb_entry(rb_parent(prev),
     74                        struct vm_area_struct, vm_rb);
     75             if (prev == vma->vm_rb.rb_left) {
     76                 gap_start = vma->vm_prev->vm_end;
     77                 gap_end = vma->vm_start;
     78                 goto check_current;
     79             }
     80         }
     81     }
     82 
     83 check_highest:
     84     /* Check highest gap, which does not precede any rbtree node */
     85     gap_start = mm->highest_vm_end;
     86     gap_end = ULONG_MAX;  /* Only for VM_BUG_ON below */
     87     if (gap_start > high_limit)
     88         return -ENOMEM;
     89 
     90 found:
     91     /* We found a suitable gap. Clip it with the original low_limit. */
     92     if (gap_start < info->low_limit)
     93         gap_start = info->low_limit;
     94 
     95     /* Adjust gap address to the desired alignment */
     96     gap_start += (info->align_offset - gap_start) & info->align_mask;
     97 
     98     VM_BUG_ON(gap_start + info->length > info->high_limit);
     99     VM_BUG_ON(gap_start + info->length > gap_end);
    100     return gap_start;
    101 }
    unmapped_area

    如果前边找不到合适的空闲区域,那么就通过该函数查找,该函数只查找红黑树根的左侧是否有满足大小的空闲区域,有的话分配,没有的话在高端地址上分配(高端地址的话和红黑树就没有关系了)。所谓的左侧,是因为线性区除了以红黑树的形式组织外,还以链表进行组织,二者都可以访问所有的线性区节点,而节点在链表中是按照地址升序进行排列的,那么一个节点的左侧就是指小于该线性区地址的区域。第32行将红黑树根所对应的线性区指针赋给vma,第33-34判断根节点的左侧是否有满足大小的空闲区域(vma的rb_subtree_gap成员保存了该vma左侧最大的空闲区的大小),如果没有的话,跳转到check_highest处。否则,说明根节点的左侧有足够的空闲,执行第36行的大循环,第38行根节点的起始地址暂时当作空闲区域的结束地址(因为要遍历左子树,左子树的线性地址小于根,因此根的地址作为结束地址),第39-42左子树非空的话,left指向左子树的根,第43行判断left节点的左侧是否有足够的空间,有的话,vma指向左子树的根,进入下一次循环,开始对左子树遍历。如果某个根节点左子树为空或者其左侧没有足够大空闲区域,第49行将该根节点的前一个节点(如果存在的话),或者0暂时作为空闲区域的起始地址,第54行该空闲区域的大小满足需求,跳到found处标志找到空闲区域了。否则进入第58行,如果该根节点的右子树不为空并且右子树根节点的左侧有满足大小的空闲区域,则对右子树进行遍历,进入下一次循环,整个过程类似于递归。如果该根节点的右子树为空或者右子树根节点的左侧没有满足大小的空闲区域,进入第69行,使prev指向该根节点,使vma指向根节点在红黑树中的父节点。第75行如果prev节点是vma的左孩子,则使vma节点的vm_prev(vma节点在链表中的前驱节点)的结束地址作为空闲区域的开始地址,vma的开始地址作为空闲区域的结束地址,跳到check_current处进行比较;否则继续第69行小循环,直到找出满足prev是vma的左孩子这样的条件。第83行,如果在红黑树中找不到合适区域,就会挑战到这里,在高端地址分配空闲区域,内存描述符mm的highest_vm_end域保存了线性区的结束地址,同时也是高端区域的起始地址(高端区域不用红黑树来管理),第85行将该起始地址作为空闲区域的起始地址,第87行如果空闲区域起始地址超出了最大限制,返回错误码,否则意味着找到了,第92-93如果起始地址小于最低限制,则将其调整为最低限制的地址,第96行对起始地址按照预设的对齐方式对齐后,第100行返回找到的空闲区域起始地址。分析了这么多,肯定很难理解吧,笔者给大家一个建议,画一棵红黑树,然后中序遍历这棵红黑树就得到了红黑树节点在链表中的顺序,接着可以假设链表中不同的相邻节点之间存在满足大小的空闲区域,进而按照该函数的算法去遍历一下红黑树,看是否能找到你所假设的那些满足大小的区域,该算法复杂又精妙~。

    4.insert_vm_struct(mm/mmap.c)

     1 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
     2 {
     3     struct vm_area_struct *prev;
     4     struct rb_node **rb_link, *rb_parent;
     5 
     6     /*
     7      * The vm_pgoff of a purely anonymous vma should be irrelevant
     8      * until its first write fault, when page's anon_vma and index
     9      * are set.  But now set the vm_pgoff it will almost certainly
    10      * end up with (unless mremap moves it elsewhere before that
    11      * first wfault), so /proc/pid/maps tells a consistent story.
    12      *
    13      * By setting it to reflect the virtual start address of the
    14      * vma, merges and splits can happen in a seamless way, just
    15      * using the existing file pgoff checks and manipulations.
    16      * Similarly in do_mmap_pgoff and in do_brk.
    17      */
    18     if (!vma->vm_file) {
    19         BUG_ON(vma->anon_vma);
    20         vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
    21     }
    22     if (find_vma_links(mm, vma->vm_start, vma->vm_end,
    23                &prev, &rb_link, &rb_parent))
    24         return -ENOMEM;
    25     if ((vma->vm_flags & ) &&
    26          security_vm_enough_memory_mm(mm, vma_pages(vma)))
    27         return -ENOMEM;
    28 
    29     vma_link(mm, vma, prev, rb_link, rb_parent);
    30     return 0;
    31 }

     该函数向线性区红黑树以及线性区链表中插入一个线性区节点。参数mm是红黑树所在的内存描述符地址,vma是要插入的线性区。第18-20行,如果线性区的不是文件映射的话,设置它的vm_pgoff域。第22行查找要插入的位置,后边分析find_vma_links函数。第25-27行如果该线性区用作IPC共享区,那么检查是否还有足够的地址空间供插入,没有的话返回错误码。第29行将vma插入红黑树中,随后分析。下面我们先分析find_vma_links函数代码(mm/mmap.c):

     1 static int find_vma_links(struct mm_struct *mm, unsigned long addr,
     2         unsigned long end, struct vm_area_struct **pprev,
     3         struct rb_node ***rb_link, struct rb_node **rb_parent)
     4 {
     5     struct rb_node **__rb_link, *__rb_parent, *rb_prev;
     6 
     7     __rb_link = &mm->mm_rb.rb_node;
     8     rb_prev = __rb_parent = NULL;
     9 
    10     while (*__rb_link) {
    11         struct vm_area_struct *vma_tmp;
    12 
    13         __rb_parent = *__rb_link;
    14         vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
    15 
    16         if (vma_tmp->vm_end > addr) {
    17             /* Fail if an existing vma overlaps the area */
    18             if (vma_tmp->vm_start < end)
    19                 return -ENOMEM;
    20             __rb_link = &__rb_parent->rb_left;
    21         } else {
    22             rb_prev = __rb_parent;
    23             __rb_link = &__rb_parent->rb_right;
    24         }
    25     }
    26 
    27     *pprev = NULL;
    28     if (rb_prev)
    29         *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
    30     *rb_link = __rb_link;
    31     *rb_parent = __rb_parent;
    32     return 0;
    33 }

    该函数为新的线性区寻找适当的插入位置。第7行使__rb_link指向红黑树的根节点。第10-25行对整棵树进行遍历来寻找插入位置。第13行使__rb_parent也指向当前根节点,第14行vma_tmp保存根节点所在的线性区指针。第16-20行,如果addr地址小于当前线性区的结束地址,说明要查入的位置在当前线性区的左子树上,第18行需要保证要插入的线性区的结束地址没有延伸到当前线性区内,如果覆盖到了当前线性区则返回错误码,否则遍历当前节点的左子树,继续寻找插入位置。第21-23行说明addr地址大于当前根节点的结束地址,则要查入的位置在当前线性区的右子树上,那么遍历右子树查找。一直遍历到某个节点,该节点要么是叶子节点,要么没有左孩子或者右孩子,这时循环退出循环(保证至少一个孩子为空才能打破循环条件),该节点就是待插入节点,新节点将成为该节点的左孩子或者右孩子。第29行将待插入节点所在的线性区指针保存到*pprev中传递回调用函数。第30行将待插入叶子节点的左孩子或者右孩子的指针(二级指针)保存到*rb_link中传递回调用函数,在这里二级指针值非空,但是指向的值一定为NULL。第31行将待插入节点的指针存入*rb_parent中传递回调用函数,该指针不同于*pprev。下面分析vma_link函数(mm/mmap.c):

     1 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
     2             struct vm_area_struct *prev, struct rb_node **rb_link,
     3             struct rb_node *rb_parent)
     4 {
     5     struct address_space *mapping = NULL;
     6 
     7     if (vma->vm_file) {
     8         mapping = vma->vm_file->f_mapping;
     9         mutex_lock(&mapping->i_mmap_mutex);
    10     }
    11 
    12     __vma_link(mm, vma, prev, rb_link, rb_parent);
    13     __vma_link_file(vma);
    14 
    15     if (mapping)
    16         mutex_unlock(&mapping->i_mmap_mutex);
    17 
    18     mm->map_count++;
    19     validate_mm(mm);
    20 }

    该函数通过第12行的__vma_link函数将vma插入到红黑树和链表中。第18行增加线性区的数量。接着看下__vma_link函数代码(mm/mmap.c):

    1 static void
    2 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
    3     struct vm_area_struct *prev, struct rb_node **rb_link,
    4     struct rb_node *rb_parent)
    5 {
    6     __vma_link_list(mm, vma, prev, rb_parent);
    7     __vma_link_rb(mm, vma, rb_link, rb_parent);
    8 }

    可以看到,第6-7行分别将vma插入到了链表和红黑树中。第一个函数我们就不分析了,很简单的双向链表插入~,直接来看__vma_link_rb函数(mm/mmap.c)

     1 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
     2         struct rb_node **rb_link, struct rb_node *rb_parent)
     3 {
     4     /* Update tracking information for the gap following the new vma. */
     5     if (vma->vm_next)
     6         vma_gap_update(vma->vm_next);
     7     else
     8         mm->highest_vm_end = vma->vm_end;
     9 
    10     /*
    11      * vma->vm_prev wasn't known when we followed the rbtree to find the
    12      * correct insertion point for that vma. As a result, we could not
    13      * update the vma vm_rb parents rb_subtree_gap values on the way down.
    14      * So, we first insert the vma with a zero rb_subtree_gap value
    15      * (to be consistent with what we did on the way down), and then
    16      * immediately update the gap to the correct value. Finally we
    17      * rebalance the rbtree after all augmented values have been set.
    18      */
    19     rb_link_node(&vma->vm_rb, rb_parent, rb_link);
    20     vma->rb_subtree_gap = 0;
    21     vma_gap_update(vma);
    22     vma_rb_insert(vma, &mm->mm_rb);
    23 }

    第5-6行如果新插入的线性区的在链表中的下一个线性区存在,则更新下一个线性区的rb_subtree_gap域,否则第8行将新插入线性区的结束地址保存到内存描述符的highest_vm_end域(如果新线性区的vm_next域为空,本身就说明新线性区是最后一个线性区,其结束地址理应作为所作为mm的highest_vm_end值)。第19行将新节点插入到待插节点上(只是插入红黑树的一个步骤,后边还要对红黑树进行着色和旋转等操作),并对新节点初始化。第20行初始化新线性区的rb_subtree_gap,第21行进一步更新新线性区的rb_subtree_gap域。我们最关心的是第22行的vma_rb_insert函数,该函数进一步完善了新线性区在红黑树上的插入操作。代码如下(mm/mmap.c)

    1 static inline void vma_rb_insert(struct vm_area_struct *vma,
    2                  struct rb_root *root)
    3 {
    4     /* All rb_subtree_gap values must be consistent prior to insertion */
    5     validate_mm_rb(root, NULL);
    6 
    7     rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
    8 }

    第5行的函数遍历整棵红黑树,对所有节点进行错误检查,有错误的话,内核就会发出oops~。第7行的函数调用了__rb_insert_augmented-----> __rb_insert,所以我们直接看 __rb_insert函数,代码如下(lib/rbtree.c)

      1 static __always_inline void
      2 __rb_insert(struct rb_node *node, struct rb_root *root,
      3         void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
      4 {
      5     struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
      6 
      7     while (true) {
      8         /*
      9          * Loop invariant: node is red
     10          *
     11          * If there is a black parent, we are done.
     12          * Otherwise, take some corrective action as we don't
     13          * want a red root or two consecutive red nodes.
     14          */
     15         if (!parent) {
     16             rb_set_parent_color(node, NULL, RB_BLACK);
     17             break;
     18         } else if (rb_is_black(parent))
     19             break;
     20 
     21         gparent = rb_red_parent(parent);
     22 
     23         tmp = gparent->rb_right;
     24         if (parent != tmp) {    /* parent == gparent->rb_left */
     25             if (tmp && rb_is_red(tmp)) {
     26                 /*
     27                  * Case 1 - color flips
     28                  *
     29                  *       G            g
     30                  *      /           / 
     31                  *     p   u  -->   P   U
     32                  *    /            /
     33                  *   n            N
     34                  *
     35                  * However, since g's parent might be red, and
     36                  * 4) does not allow this, we need to recurse
     37                  * at g.
     38                  */
     39                 rb_set_parent_color(tmp, gparent, RB_BLACK);
     40                 rb_set_parent_color(parent, gparent, RB_BLACK);
     41                 node = gparent;
     42                 parent = rb_parent(node);
     43                 rb_set_parent_color(node, parent, RB_RED);
     44                 continue;
     45             }
     46 
     47             tmp = parent->rb_right;
     48             if (node == tmp) {
     49                 /*
     50                  * Case 2 - left rotate at parent
     51                  *
     52                  *      G             G
     53                  *     /            / 
     54                  *    p   U  -->    n   U
     55                  *                /
     56                  *      n         p
     57                  *
     58                  * This still leaves us in violation of 4), the
     59                  * continuation into Case 3 will fix that.
     60                  */
     61                 parent->rb_right = tmp = node->rb_left;
     62                 node->rb_left = parent;
     63                 if (tmp)
     64                     rb_set_parent_color(tmp, parent,
     65                                 RB_BLACK);
     66                 rb_set_parent_color(parent, node, RB_RED);
     67                 augment_rotate(parent, node);
     68                 parent = node;
     69                 tmp = node->rb_right;
     70             }
     71 
     72             /*
     73              * Case 3 - right rotate at gparent
     74              *
     75              *        G           P
     76              *       /          / 
     77              *      p   U  -->  n   g
     78              *     /                 
     79              *    n                   U
     80              */
     81             gparent->rb_left = tmp;  /* == parent->rb_right */
     82             parent->rb_right = gparent;
     83             if (tmp)
     84                 rb_set_parent_color(tmp, gparent, RB_BLACK);
     85             __rb_rotate_set_parents(gparent, parent, root, RB_RED);
     86             augment_rotate(gparent, parent);
     87             break;
     88         } else {
     89             tmp = gparent->rb_left;
     90             if (tmp && rb_is_red(tmp)) {
     91                 /* Case 1 - color flips */
     92                 rb_set_parent_color(tmp, gparent, RB_BLACK);
     93                 rb_set_parent_color(parent, gparent, RB_BLACK);
     94                 node = gparent;
     95                 parent = rb_parent(node);
     96                 rb_set_parent_color(node, parent, RB_RED);
     97                 continue;
     98             }
     99 
    100             tmp = parent->rb_left;
    101             if (node == tmp) {
    102                 /* Case 2 - right rotate at parent */
    103                 parent->rb_left = tmp = node->rb_right;
    104                 node->rb_right = parent;
    105                 if (tmp)
    106                     rb_set_parent_color(tmp, parent,
    107                                 RB_BLACK);
    108                 rb_set_parent_color(parent, node, RB_RED);
    109                 augment_rotate(parent, node);
    110                 parent = node;
    111                 tmp = node->rb_left;
    112             }
    113 
    114             /* Case 3 - left rotate at gparent */
    115             gparent->rb_right = tmp;  /* == parent->rb_left */
    116             parent->rb_left = gparent;
    117             if (tmp)
    118                 rb_set_parent_color(tmp, gparent, RB_BLACK);
    119             __rb_rotate_set_parents(gparent, parent, root, RB_RED);
    120             augment_rotate(gparent, parent);
    121             break;
    122         }
    123     }
    124 }
    __rb_insert

    该函数是进行红黑树插入操作的内核函数,只要用到红黑树的地方,最终都会调用该函数完成节点插入。因此该函数是我们分析的重点。第5行将node节点的父指针(完整的父指针,包括最低bit位的颜色信息)赋给parent变量。第15-17行如果parent为空,说明当前node节点就是红黑树根,对其着黑色,然后退出循环,完成操作。否则第18-19行,parent存在并且颜色是黑色,则不需要任何操作,退出循环。否则,说明parent存在但是颜色为红色,执行21行,将parent节点的父指针(node节点的祖父)赋给gparent变量。第23行,tmp指向了node爷爷的右孩子(可能是node的父亲也可能是node的叔叔),第24行如果tmp是node的叔叔的话,执行if体。(case1颜色翻转)第25-45行,如果node的叔叔tmp是红色的(node的父亲已经是红色的),则进行颜色翻转:第39-49行将node的叔叔tmp和父亲parent着为黑色,第43行将node的祖父gparent着为红色,完成翻转,进入下一次循环;如果node没有叔叔或者其叔叔为黑色,则不进行case1的翻转,第47行使tmp指向node父亲的右孩子(可能是node本身或者node的兄弟),(case2父节点左旋)如果node是其父亲节点的右孩子,对node的父亲左旋,使node和其父亲角色互换,并使其父亲成为node的左孩子,同时使node原先的左孩子成为其原先父亲的右孩子。就代码而言,第61行使node的左孩子成为其父亲的右孩子,同时也使tmp指向node的左孩子,第62行使node的父亲成为node的左孩子,第63-64行,如果node原先存在左孩子,则对该左孩子(已经成为node原先父亲的右孩子了)着黑色,第66行对node原先父亲着红色,第68行使parent变量指向node节点,第69行使tmp指向node的右孩子,剩下的部分将在case3中完成;(case3祖父节点右旋)只要node的父亲是其祖父的左孩子,就要执行第81-87行,对node的祖父节点右旋(case2的剩下部分也是这么处理):使node的父亲成为树根,node的祖父成为其父亲的右孩子,node的父亲的右孩子成为其祖父的左孩子,就代码而言,第81行使node父亲的右孩子tmp成为其祖父的左孩子,第82行使node的祖父成为其父亲的右孩子,第83-87行,如果node父亲原先的右孩子tmp不为空,将tmp着为黑色。第85行函数node节点原先的祖父的颜色着给node原先的父亲(也是现在的父亲),然后给node节点原先的祖父着红色,再使node原先祖父的父节点成为其现在父亲的父节点,完成右旋。如果第24行node的父亲节点是其祖父节点的右孩子(那么tmp就指向了node的父亲,而不是叔叔),则执行第88行以后的部分,也分为三种旋转。(case1‘颜色翻转)第89行使tmp指向node祖父的左孩子,也就是node的叔叔,第90-97行如果node的叔叔存在并且是红色的,就要进行case1‘翻转,第92-93给node的父亲和叔叔着黑色,第96行给node的祖父着红色,完成case1‘翻转,进入下一次循环。(case2‘父节点右旋)否则node的叔叔不存在或者存在但是颜色为黑色,就要对node父亲右旋,第100行使tmp指向其父亲的左孩子(可能是node本身或者node的兄弟),第101行node是其父亲的左孩子,则进行case2‘旋转,第103行使node的右孩子成为其父亲的左孩子,同时使tmp指向node的右孩子,第104行使node的父亲成为node的右孩子,第105-106如果node原先存在右孩子,则给该右孩子着黑色,第108行给node原先的节点着红色。第110-111行使parent变量指向node节点,使tmp指向node的左孩子,剩下的部分将在case3‘中完成;(case3‘祖父节点左旋)只要node的父亲是其祖父的左孩子,就要执行第115-121行,对node的祖父节点左旋(case2‘的剩下部分也是这么处理):使node的父亲成为树根,node的祖父成为其父亲的左孩子,node的父亲的左孩子成为其祖父的右孩子,就代码而言,第115行使node父亲的左孩子tmp成为其祖父的右孩子,第116行使node的祖父成为其父亲的左孩子,第117-118行,如果node父亲原先的左孩子tmp不为空,将tmp着为黑色。第119行函数将node节点原先的祖父的颜色着给node原先的父亲(也是现在的父亲),然后给node节点原先的祖父着红色,再使node原先祖父的父节点成为其现在父亲的父节点,完成左旋。可以看到,其实case1’-3‘和case1-3是完全对称的操作过程,笔者在这里不厌其烦,都详细的写出来,希望大家结合红黑树操作几遍就知道翻转和旋转的算法是怎么回事了~

    5.do_mmap_pgoff函数(mm/mmap.c)

      1 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
      2             unsigned long len, unsigned long prot,
      3             unsigned long flags, unsigned long pgoff,
      4             unsigned long *populate)
      5 {
      6     struct mm_struct * mm = current->mm;
      7     vm_flags_t vm_flags;
      8 
      9     *populate = 0;
     10 
     11     /*
     12      * Does the application expect PROT_READ to imply PROT_EXEC?
     13      *
     14      * (the exception is when the underlying filesystem is noexec
     15      *  mounted, in which case we dont add PROT_EXEC.)
     16      */
     17     if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
     18         if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
     19             prot |= PROT_EXEC;
     20 
     21     if (!len)
     22         return -EINVAL;
     23 
     24     if (!(flags & MAP_FIXED))
     25         addr = round_hint_to_min(addr);
     26 
     27     /* Careful about overflows.. */
     28     len = PAGE_ALIGN(len);
     29     if (!len)
     30         return -ENOMEM;
     31 
     32     /* offset overflow? */
     33     if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
     34                return -EOVERFLOW;
     35 
     36     /* Too many mappings? */
     37     if (mm->map_count > sysctl_max_map_count)
     38         return -ENOMEM;
     39 
     40     /* Obtain the address to map to. we verify (or select) it and ensure
     41      * that it represents a valid section of the address space.
     42      */
     43     addr = get_unmapped_area(file, addr, len, pgoff, flags);
     44     if (addr & ~PAGE_MASK)
     45         return addr;
     46 
     47     /* Do simple checking here so the lower-level routines won't have
     48      * to. we assume access permissions have been handled by the open
     49      * of the memory object, so we don't do any here.
     50      */
     51     vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
     52             mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
     53 
     54     if (flags & MAP_LOCKED)
     55         if (!can_do_mlock())
     56             return -EPERM;
     57 
     58     if (mlock_future_check(mm, vm_flags, len))
     59         return -EAGAIN;
     60 
     61     if (file) {
     62         struct inode *inode = file_inode(file);
     63 
     64         switch (flags & MAP_TYPE) {
     65         case MAP_SHARED:
     66             if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
     67                 return -EACCES;
     68 
     69             /*
     70              * Make sure we don't allow writing to an append-only
     71              * file..
     72              */
     73             if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
     74                 return -EACCES;
     75 
     76             /*
     77              * Make sure there are no mandatory locks on the file.
     78              */
     79             if (locks_verify_locked(file))
     80                 return -EAGAIN;
     81 
     82             vm_flags |= VM_SHARED | VM_MAYSHARE;
     83             if (!(file->f_mode & FMODE_WRITE))
     84                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
     85 
     86             /* fall through */
     87         case MAP_PRIVATE:
     88             if (!(file->f_mode & FMODE_READ))
     89                 return -EACCES;
     90             if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
     91                 if (vm_flags & VM_EXEC)
     92                     return -EPERM;
     93                 vm_flags &= ~VM_MAYEXEC;
     94             }
     95 
     96             if (!file->f_op->mmap)
     97                 return -ENODEV;
     98             if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
     99                 return -EINVAL;
    100             break;
    101 
    102         default:
    103             return -EINVAL;
    104         }
    105     } else {
    106         switch (flags & MAP_TYPE) {
    107         case MAP_SHARED:
    108             if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
    109                 return -EINVAL;
    110             /*
    111              * Ignore pgoff.
    112              */
    113             pgoff = 0;
    114             vm_flags |= VM_SHARED | VM_MAYSHARE;
    115             break;
    116         case MAP_PRIVATE:
    117             /*
    118              * Set pgoff according to addr for anon_vma.
    119              */
    120             pgoff = addr >> PAGE_SHIFT;
    121             break;
    122         default:
    123             return -EINVAL;
    124         }
    125     }
    126 
    127     /*
    128      * Set 'VM_NORESERVE' if we should not account for the
    129      * memory use of this mapping.
    130      */
    131     if (flags & MAP_NORESERVE) {
    132         /* We honor MAP_NORESERVE if allowed to overcommit */
    133         if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
    134             vm_flags |= VM_NORESERVE;
    135 
    136         /* hugetlb applies strict overcommit unless MAP_NORESERVE */
    137         if (file && is_file_hugepages(file))
    138             vm_flags |= VM_NORESERVE;
    139     }
    140 
    141     addr = mmap_region(file, addr, len, vm_flags, pgoff);
    142     if (!IS_ERR_VALUE(addr) &&
    143         ((vm_flags & VM_LOCKED) ||
    144          (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
    145         *populate = len;
    146     return addr;
    147 }
    do_mmap_pgoff

    该函数用来分配线性区并且创建映射。另外,该函数所在文件中还有另外一个函数do_brk,和本函数类似,不过是用来做匿名映射。第17-19行判断新创建线性区的可读属性是否暗含了可执行属性,是的话为新线性区添加可执行属性。第33-34行检查len是否造成地址空间(页面)的溢出,造成溢出的话返回错误码。第37-38检查线性区数量是否超出限制。第43行获取一个空闲的线性区,此函数上边已分析过。第51-52为新线性区添加各种标志(读写执行等),第61-125行,当file结构体指针为空或者非空时,根据新线性区是共享的还是私有的,分别设置不同的标志(VM_SHARED, VM_MAYSHARE)。第141行mmap_region函数分配新的线性区结构体,并检查是否可以与其它线性区进行合并,待会分析该函数。第146行返回新线性区的起始地址。下面看下mmap_region函数(mm/mmap.c)

      1 unsigned long mmap_region(struct file *file, unsigned long addr,
      2         unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
      3 {
      4     struct mm_struct *mm = current->mm;
      5     struct vm_area_struct *vma, *prev;
      6     int error;
      7     struct rb_node **rb_link, *rb_parent;
      8     unsigned long charged = 0;
      9 
     10     /* Check against address space limit. */
     11     if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
     12         unsigned long nr_pages;
     13 
     14         /*
     15          * MAP_FIXED may remove pages of mappings that intersects with
     16          * requested mapping. Account for the pages it would unmap.
     17          */
     18         if (!(vm_flags & MAP_FIXED))
     19             return -ENOMEM;
     20 
     21         nr_pages = count_vma_pages_range(mm, addr, addr + len);
     22 
     23         if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
     24             return -ENOMEM;
     25     }
     26 
     27     /* Clear old maps */
     28     error = -ENOMEM;
     29 munmap_back:
     30     if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
     31         if (do_munmap(mm, addr, len))
     32             return -ENOMEM;
     33         goto munmap_back;
     34     }
     35 
     36     /*
     37      * Private writable mapping: check memory availability
     38      */
     39     if (accountable_mapping(file, vm_flags)) {
     40         charged = len >> PAGE_SHIFT;
     41         if (security_vm_enough_memory_mm(mm, charged))
     42             return -ENOMEM;
     43         vm_flags |= VM_ACCOUNT;
     44     }
     45 
     46     /*
     47      * Can we just expand an old mapping?
     48      */
     49     vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
     50     if (vma)
     51         goto out;
     52 
     53     /*
     54      * Determine the object being mapped and call the appropriate
     55      * specific mapper. the address has already been validated, but
     56      * not unmapped, but the maps are removed from the list.
     57      */
     58     vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
     59     if (!vma) {
     60         error = -ENOMEM;
     61         goto unacct_error;
     62     }
     63 
     64     vma->vm_mm = mm;
     65     vma->vm_start = addr;
     66     vma->vm_end = addr + len;
     67     vma->vm_flags = vm_flags;
     68     vma->vm_page_prot = vm_get_page_prot(vm_flags);
     69     vma->vm_pgoff = pgoff;
     70     INIT_LIST_HEAD(&vma->anon_vma_chain);
     71 
     72     if (file) {
     73         if (vm_flags & VM_DENYWRITE) {
     74             error = deny_write_access(file);
     75             if (error)
     76                 goto free_vma;
     77         }
     78         vma->vm_file = get_file(file);
     79         error = file->f_op->mmap(file, vma);
     80         if (error)
     81             goto unmap_and_free_vma;
     82 
     83         /* Can addr have changed??
     84          *
     85          * Answer: Yes, several device drivers can do it in their
     86          *         f_op->mmap method. -DaveM
     87          * Bug: If addr is changed, prev, rb_link, rb_parent should
     88          *      be updated for vma_link()
     89          */
     90         WARN_ON_ONCE(addr != vma->vm_start);
     91 
     92         addr = vma->vm_start;
     93         vm_flags = vma->vm_flags;
     94     } else if (vm_flags & VM_SHARED) {
     95         error = shmem_zero_setup(vma);
     96         if (error)
     97             goto free_vma;
     98     }
     99 
    100     if (vma_wants_writenotify(vma)) {
    101         pgprot_t pprot = vma->vm_page_prot;
    102 
    103         /* Can vma->vm_page_prot have changed??
    104          *
    105          * Answer: Yes, drivers may have changed it in their
    106          *         f_op->mmap method.
    107          *
    108          * Ensures that vmas marked as uncached stay that way.
    109          */
    110         vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
    111         if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
    112             vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    113     }
    114 
    115     vma_link(mm, vma, prev, rb_link, rb_parent);
    116     /* Once vma denies write, undo our temporary denial count */
    117     if (vm_flags & VM_DENYWRITE)
    118         allow_write_access(file);
    119     file = vma->vm_file;
    120 out:
    121     perf_event_mmap(vma);
    122 
    123     vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
    124     if (vm_flags & VM_LOCKED) {
    125         if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
    126                     vma == get_gate_vma(current->mm)))
    127             mm->locked_vm += (len >> PAGE_SHIFT);
    128         else
    129             vma->vm_flags &= ~VM_LOCKED;
    130     }
    131 
    132     if (file)
    133         uprobe_mmap(vma);
    134 
    135     /*
    136      * New (or expanded) vma always get soft dirty status.
    137      * Otherwise user-space soft-dirty page tracker won't
    138      * be able to distinguish situation when vma area unmapped,
    139      * then new mapped in-place (which must be aimed as
    140      * a completely new data area).
    141      */
    142     vma->vm_flags |= VM_SOFTDIRTY;
    143 
    144     return addr;
    145 
    146 unmap_and_free_vma:
    147     if (vm_flags & VM_DENYWRITE)
    148         allow_write_access(file);
    149     vma->vm_file = NULL;
    150     fput(file);
    151 
    152     /* Undo any partial mapping done by a device driver. */
    153     unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
    154     charged = 0;
    155 free_vma:
    156     kmem_cache_free(vm_area_cachep, vma);
    157 unacct_error:
    158     if (charged)
    159         vm_unacct_memory(charged);
    160     return error;
    161 }
    mmap_region

    第30行的find_vma_links函数为新线性区查找插入位置,上边已经分析过该函数,如果没找到插入点,说明新线性区可能延伸到了相邻线性区,则执行if体,撤消新线性区。第49行将新线性区和它前后的线性区进行合并(它们的vm_flags标志必须完全相同),第50-51行,如果合并成功,跳转到第120行。否则第58行,申请新的线性区结构体来管理新分配的地址空间。第64-70行对新线性区结构体的各个成员初始化。第72-98行当file结构体指针非空或者新线性区是匿名共享区时,分别对新线性区结构体进行相应初始化。第115行将新线性区插入红黑树和链表中。第124-130行如果新线性区被锁定,则将线性区的页数累加到vma的locked_vm域中。第142行给新线性区中设置脏标志,第144行返回新线性区的起始虚拟地址。

    6.do_munmap函数(mm/mmap.c)

     1 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
     2 {
     3     unsigned long end;
     4     struct vm_area_struct *vma, *prev, *last;
     5 
     6     if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
     7         return -EINVAL;
     8 
     9     if ((len = PAGE_ALIGN(len)) == 0)
    10         return -EINVAL;
    11 
    12     /* Find the first overlapping VMA */
    13     vma = find_vma(mm, start);
    14     if (!vma)
    15         return 0;
    16     prev = vma->vm_prev;
    17     /* we have  start < vma->vm_end  */
    18 
    19     /* if it doesn't overlap, we have nothing.. */
    20     end = start + len;
    21     if (vma->vm_start >= end)
    22         return 0;
    23 
    24     /*
    25      * If we need to split any vma, do it now to save pain later.
    26      *
    27      * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
    28      * unmapped vm_area_struct will remain in use: so lower split_vma
    29      * places tmp vma above, and higher split_vma places tmp vma below.
    30      */
    31     if (start > vma->vm_start) {
    32         int error;
    33 
    34         /*
    35          * Make sure that map_count on return from munmap() will
    36          * not exceed its limit; but let map_count go just above
    37          * its limit temporarily, to help free resources as expected.
    38          */
    39         if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
    40             return -ENOMEM;
    41 
    42         error = __split_vma(mm, vma, start, 0);
    43         if (error)
    44             return error;
    45         prev = vma;
    46     }
    47 
    48     /* Does it split the last one? */
    49     last = find_vma(mm, end);
    50     if (last && end > last->vm_start) {
    51         int error = __split_vma(mm, last, end, 1);
    52         if (error)
    53             return error;
    54     }
    55     vma = prev? prev->vm_next: mm->mmap;
    56 
    57     /*
    58      * unlock any mlock()ed ranges before detaching vmas
    59      */
    60     if (mm->locked_vm) {
    61         struct vm_area_struct *tmp = vma;
    62         while (tmp && tmp->vm_start < end) {
    63             if (tmp->vm_flags & VM_LOCKED) {
    64                 mm->locked_vm -= vma_pages(tmp);
    65                 munlock_vma_pages_all(tmp);
    66             }
    67             tmp = tmp->vm_next;
    68         }
    69     }
    70 
    71     /*
    72      * Remove the vma's, and unmap the actual pages
    73      */
    74     detach_vmas_to_be_unmapped(mm, vma, prev, end);
    75     unmap_region(mm, vma, prev, start, end);
    76 
    77     /* Fix up all other VM information */
    78     remove_vma_list(mm, vma);
    79 
    80     return 0;
    81 }
    do_munmap

     该函数用来删除一个线性区。第13行查找start地址所在的线性区,此函数上边分析过了。第20-22行如果start----start+len的区域位于找到的vma的左侧,则直接返回,什么都不用操作(这也说明要删除的地址空间是空闲的,未非配给线性区)。第31行如果start起始地址位于找到的vma中,执行if体,第39行如果start+len<vma->vm_end(说明要删除的地址空间全部位于所找到的vma中,也说明删除后将产生两个不连续的线性区),同时当前mm中的线性区数量已经达到最大值(此时如果坚持删除的话,所产生的线性区总数必定要超过最大值),则返回错误码。第42行的__split_vma函数,将所找到的vma线性区以start地址为界,划分为两个部分。第49行找到end地址所在的线性区,第50-54行如果end地址处于所找到的last线性区中,则以end地址为界,将last划分为两个部分。第55行使vma指向包含start的线性区,该线性区可能包含start,也可能处于start的右侧。第60-69行,如果待删除的线性区被锁定,则全部解锁。

  • 相关阅读:
    c语言结构体数组引用
    c语言结构体数组定义的三种方式
    如何为SAP WebIDE开发扩展(Extension),并部署到SAP云平台上
    SAP SRM ABAP Webdynpro和CFCA usb key集成的一个原型开发
    使用SAP API portal进行SAP SuccessFactors的API测试
    SAP UI5应用里的页面路由处理
    在SAP WebIDE Database Explorer里操作hdi实例
    如何使用SAP事务码SAT进行UI应用的性能分析
    使用SAP WebIDE进行SAP Cloud Platform Business Application开发
    SAP CRM WebClient UI ON_NEW_FOCUS的用途
  • 原文地址:https://www.cnblogs.com/liangning/p/3907424.html
Copyright © 2011-2022 走看看