zoukankan      html  css  js  c++  java
  • linux内核分析之进程地址空间管理

    1、struct task_struct 

           进程内核栈是操作系统为管理每一个进程而分配的一个4k或者8k内存大小的一片内存区域,里面存放了一个进程的所有信息,它能够完整的描述一个正在执行的程序:它打开的文件,进程的地址空间,挂起的信号,进程的状态,从task_struct中可以看到对一个正在执行的程序的完整描述。

       

     进程描述符:

    1 struct thread_info {
    2     struct task_struct    *task;        /* main task structure */
    3     unsigned long        flags;
    4     struct exec_domain    *exec_domain;    /* execution domain */
    5     int            preempt_count;    /* 0 => preemptable, <0 => BUG */
    6     __u32 cpu; /* should always be 0 on m68k */
    7     struct restart_block    restart_block;
    8 };

       task_struct:

      1 struct task_struct {
      2     volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
      3     void *stack;
      4     atomic_t usage;
      5     unsigned int flags;    /* per process flags, defined below */
      6     unsigned int ptrace;
      7 
      8     int lock_depth;        /* BKL lock depth */
      9 
     10 #ifdef CONFIG_SMP
     11 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
     12     int oncpu;
     13 #endif
     14 #endif
     15     int load_weight;    /* for niceness load balancing purposes */
     16     int prio, static_prio, normal_prio;
     17     struct list_head run_list;
     18     struct prio_array *array;
     19 
     20     unsigned short ioprio;
     21 #ifdef CONFIG_BLK_DEV_IO_TRACE
     22     unsigned int btrace_seq;
     23 #endif
     24     unsigned long sleep_avg;
     25     unsigned long long timestamp, last_ran;
     26     unsigned long long sched_time; /* sched_clock time spent running */
     27     enum sleep_type sleep_type;
     28 
     29     unsigned int policy;
     30     cpumask_t cpus_allowed;
     31     unsigned int time_slice, first_time_slice;
     32 
     33 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
     34     struct sched_info sched_info;
     35 #endif
     36 
     37     struct list_head tasks;
     38     /*
     39      * ptrace_list/ptrace_children forms the list of my children
     40      * that were stolen by a ptracer.
     41      */
     42     struct list_head ptrace_children;
     43     struct list_head ptrace_list;
     44 
     45     struct mm_struct *mm, *active_mm;
     46 
     47 /* task state */
     48     struct linux_binfmt *binfmt;
     49     int exit_state;
     50     int exit_code, exit_signal;
     51     int pdeath_signal;  /*  The signal sent when the parent dies  */
     52     /* ??? */
     53     unsigned int personality;
     54     unsigned did_exec:1;
     55     pid_t pid;
     56     pid_t tgid;
     57 
     58 #ifdef CONFIG_CC_STACKPROTECTOR
     59     /* Canary value for the -fstack-protector gcc feature */
     60     unsigned long stack_canary;
     61 #endif
     62     /* 
     63      * pointers to (original) parent process, youngest child, younger sibling,
     64      * older sibling, respectively.  (p->father can be replaced with 
     65      * p->parent->pid)
     66      */
     67     struct task_struct *real_parent; /* real parent process (when being debugged) */
     68     struct task_struct *parent;    /* parent process */
     69     /*
     70      * children/sibling forms the list of my children plus the
     71      * tasks I'm ptracing.
     72      */
     73     struct list_head children;    /* list of my children */
     74     struct list_head sibling;    /* linkage in my parent's children list */
     75     struct task_struct *group_leader;    /* threadgroup leader */
     76 
     77     /* PID/PID hash table linkage. */
     78     struct pid_link pids[PIDTYPE_MAX];
     79     struct list_head thread_group;
     80 
     81     struct completion *vfork_done;        /* for vfork() */
     82     int __user *set_child_tid;        /* CLONE_CHILD_SETTID */
     83     int __user *clear_child_tid;        /* CLONE_CHILD_CLEARTID */
     84 
     85     unsigned int rt_priority;
     86     cputime_t utime, stime;
     87     unsigned long nvcsw, nivcsw; /* context switch counts */
     88     struct timespec start_time;
     89 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
     90     unsigned long min_flt, maj_flt;
     91 
     92       cputime_t it_prof_expires, it_virt_expires;
     93     unsigned long long it_sched_expires;
     94     struct list_head cpu_timers[3];
     95 
     96 /* process credentials */
     97     uid_t uid,euid,suid,fsuid;
     98     gid_t gid,egid,sgid,fsgid;
     99     struct group_info *group_info;
    100     kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;
    101     unsigned keep_capabilities:1;
    102     struct user_struct *user;
    103 #ifdef CONFIG_KEYS
    104     struct key *request_key_auth;    /* assumed request_key authority */
    105     struct key *thread_keyring;    /* keyring private to this thread */
    106     unsigned char jit_keyring;    /* default keyring to attach requested keys to */
    107 #endif
    108     /*
    109      * fpu_counter contains the number of consecutive context switches
    110      * that the FPU is used. If this is over a threshold, the lazy fpu
    111      * saving becomes unlazy to save the trap. This is an unsigned char
    112      * so that after 256 times the counter wraps and the behavior turns
    113      * lazy again; this to deal with bursty apps that only use FPU for
    114      * a short time
    115      */
    116     unsigned char fpu_counter;
    117     int oomkilladj; /* OOM kill score adjustment (bit shift). */
    118     char comm[TASK_COMM_LEN]; /* executable name excluding path
    119                      - access with [gs]et_task_comm (which lock
    120                        it with task_lock())
    121                      - initialized normally by flush_old_exec */
    122 /* file system info */
    123     int link_count, total_link_count;
    124 #ifdef CONFIG_SYSVIPC
    125 /* ipc stuff */
    126     struct sysv_sem sysvsem;
    127 #endif
    128 /* CPU-specific state of this task */
    129     struct thread_struct thread;
    130 /* filesystem information */
    131     struct fs_struct *fs;
    132 /* open file information */
    133     struct files_struct *files;
    134 /* namespaces */
    135     struct nsproxy *nsproxy;
    136 /* signal handlers */
    137     struct signal_struct *signal;
    138     struct sighand_struct *sighand;
    139 
    140     sigset_t blocked, real_blocked;
    141     sigset_t saved_sigmask;        /* To be restored with TIF_RESTORE_SIGMASK */
    142     struct sigpending pending;
    143 
    144     unsigned long sas_ss_sp;
    145     size_t sas_ss_size;
    146     int (*notifier)(void *priv);
    147     void *notifier_data;
    148     sigset_t *notifier_mask;
    149     
    150     void *security;
    151     struct audit_context *audit_context;
    152     seccomp_t seccomp;
    153 
    154 /* Thread group tracking */
    155        u32 parent_exec_id;
    156        u32 self_exec_id;
    157 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
    158     spinlock_t alloc_lock;
    159 
    160     /* Protection of the PI data structures: */
    161     spinlock_t pi_lock;
    162 
    163 #ifdef CONFIG_RT_MUTEXES
    164     /* PI waiters blocked on a rt_mutex held by this task */
    165     struct plist_head pi_waiters;
    166     /* Deadlock detection and priority inheritance handling */
    167     struct rt_mutex_waiter *pi_blocked_on;
    168 #endif
    169 
    170 #ifdef CONFIG_DEBUG_MUTEXES
    171     /* mutex deadlock detection */
    172     struct mutex_waiter *blocked_on;
    173 #endif
    174 #ifdef CONFIG_TRACE_IRQFLAGS
    175     unsigned int irq_events;
    176     int hardirqs_enabled;
    177     unsigned long hardirq_enable_ip;
    178     unsigned int hardirq_enable_event;
    179     unsigned long hardirq_disable_ip;
    180     unsigned int hardirq_disable_event;
    181     int softirqs_enabled;
    182     unsigned long softirq_disable_ip;
    183     unsigned int softirq_disable_event;
    184     unsigned long softirq_enable_ip;
    185     unsigned int softirq_enable_event;
    186     int hardirq_context;
    187     int softirq_context;
    188 #endif
    189 #ifdef CONFIG_LOCKDEP
    190 # define MAX_LOCK_DEPTH 30UL
    191     u64 curr_chain_key;
    192     int lockdep_depth;
    193     struct held_lock held_locks[MAX_LOCK_DEPTH];
    194     unsigned int lockdep_recursion;
    195 #endif
    196 
    197 /* journalling filesystem info */
    198     void *journal_info;
    199 
    200 /* stacked block device info */
    201     struct bio *bio_list, **bio_tail;
    202 
    203 /* VM state */
    204     struct reclaim_state *reclaim_state;
    205 
    206     struct backing_dev_info *backing_dev_info;
    207 
    208     struct io_context *io_context;
    209 
    210     unsigned long ptrace_message;
    211     siginfo_t *last_siginfo; /* For ptrace use.  */
    212 /*
    213  * current io wait handle: wait queue entry to use for io waits
    214  * If this thread is processing aio, this points at the waitqueue
    215  * inside the currently handled kiocb. It may be NULL (i.e. default
    216  * to a stack based synchronous wait) if its doing sync IO.
    217  */
    218     wait_queue_t *io_wait;
    219 #ifdef CONFIG_TASK_XACCT
    220 /* i/o counters(bytes read/written, #syscalls */
    221     u64 rchar, wchar, syscr, syscw;
    222 #endif
    223     struct task_io_accounting ioac;
    224 #if defined(CONFIG_TASK_XACCT)
    225     u64 acct_rss_mem1;    /* accumulated rss usage */
    226     u64 acct_vm_mem1;    /* accumulated virtual memory usage */
    227     cputime_t acct_stimexpd;/* stime since last update */
    228 #endif
    229 #ifdef CONFIG_NUMA
    230       struct mempolicy *mempolicy;
    231     short il_next;
    232 #endif
    233 #ifdef CONFIG_CPUSETS
    234     struct cpuset *cpuset;
    235     nodemask_t mems_allowed;
    236     int cpuset_mems_generation;
    237     int cpuset_mem_spread_rotor;
    238 #endif
    239     struct robust_list_head __user *robust_list;
    240 #ifdef CONFIG_COMPAT
    241     struct compat_robust_list_head __user *compat_robust_list;
    242 #endif
    243     struct list_head pi_state_list;
    244     struct futex_pi_state *pi_state_cache;
    245 
    246     atomic_t fs_excl;    /* holding fs exclusive resources */
    247     struct rcu_head rcu;
    248 
    249     /*
    250      * cache last used pipe for splice
    251      */
    252     struct pipe_inode_info *splice_pipe;
    253 #ifdef    CONFIG_TASK_DELAY_ACCT
    254     struct task_delay_info *delays;
    255 #endif
    256 #ifdef CONFIG_FAULT_INJECTION
    257     int make_it_fail;
    258 #endif
    259 };

    thread_info->task_struct->struct mm_struct

     1 struct mm_struct {
     2     struct vm_area_struct * mmap;        /* list of VMAs */
     3     struct rb_root mm_rb;
     4     struct vm_area_struct * mmap_cache;    /* last find_vma result */
     5     unsigned long (*get_unmapped_area) (struct file *filp,
     6                 unsigned long addr, unsigned long len,
     7                 unsigned long pgoff, unsigned long flags);
     8     void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
     9     unsigned long mmap_base;        /* base of mmap area */
    10     unsigned long task_size;        /* size of task vm space */
    11     unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */
    12     unsigned long free_area_cache;        /* first hole of size cached_hole_size or larger */
    13     pgd_t * pgd;
    14     atomic_t mm_users;            /* How many users with user space? */
    15     atomic_t mm_count;            /* How many references to "struct mm_struct" (users count as 1) */
    16     int map_count;                /* number of VMAs */
    17     struct rw_semaphore mmap_sem;
    18     spinlock_t page_table_lock;        /* Protects page tables and some counters */
    19 
    20     struct list_head mmlist;        /* List of maybe swapped mm's.  These are globally strung    //所有mm_struct形成的链表
    21                          * together off init_mm.mmlist, and are protected
    22                          * by mmlist_lock
    23                          */
    24 
    25     /* Special counters, in some configurations protected by the
    26      * page_table_lock, in other configurations by being atomic.
    27      */
    28     mm_counter_t _file_rss;                      
    29     mm_counter_t _anon_rss;                      
    30 
    31     unsigned long hiwater_rss;    /* High-watermark of RSS usage */
    32     unsigned long hiwater_vm;    /* High-water virtual memory usage */
    33 
    34     unsigned long total_vm, locked_vm, shared_vm, exec_vm;
    35     unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
    36     unsigned long start_code, end_code, start_data, end_data;   //进程代码段、数据段的开始结束地址
    37     unsigned long start_brk, brk, start_stack;                  //进程堆的首地址、堆的尾地址、进程栈的首地址
    38     unsigned long arg_start, arg_end, env_start, env_end;     //进程命令行参数的首地址、命令行参数的尾地址、环境变量的首地址、环境变量的尾地址
    39 
    40     unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
    41 
    42     cpumask_t cpu_vm_mask;
    43 
    44     /* Architecture-specific MM context */
    45     mm_context_t context;
    46 
    47     /* Swap token stuff */
    48     /*
    49      * Last value of global fault stamp as seen by this process.
    50      * In other words, this value gives an indication of how long
    51      * it has been since this task got the token.
    52      * Look at mm/thrash.c
    53      */
    54     unsigned int faultstamp;
    55     unsigned int token_priority;
    56     unsigned int last_interval;
    57 
    58     unsigned char dumpable:2;
    59 
    60     /* coredumping support */
    61     int core_waiters;
    62     struct completion *core_startup_done, core_done;
    63 
    64     /* aio bits */
    65     rwlock_t        ioctx_list_lock;
    66     struct kioctx        *ioctx_list;
    67 };

       进程内存区域可以包含各种内存对象,从上面的mm_struct结构体中我们就可以看出来,主要有:

      1、可执行文件代码的内存映射,称为代码段

      2、可执行文件的已初始化全局变量的内存映射,称为数据段

      3、包含未初始化的全局变量,就是bass段的零页的内存映射

      4、用于进程用户空间栈的零页内存映射,这里不要和进程内核栈混淆,进程的内核栈独立存在并由内核维护,因为内核管理着所有进程。前面讲述的内核栈存放着管理所有进程的信息,所以内核管理着内核栈,内核栈管理着进程。

      5、每一个诸如c库或者动态链接库等共享库的代码段、数据段和bss也会被载入进程的地址空间

      6、任何内存映射文件

      7、任何共享内存段

      8、任何匿名的内存映射,比如malloc分配的内存

      上面讲了很多我们可以将进程的地址空间理解为程序在操作系统上运行时的一个内存空间,在《程序员的自我修养》中作者这样讲,程序和进程就好比人炒菜,程序就是菜谱,人就是cpu,整个炒菜的过程就是进程。可见我们写的代码,代码里面定义的变量全局的,静态的,申请的内存这些没有运行时都会存储在硬盘里面,一旦代码运行起来就是一个进程,就会形成一个新的进程空间,也就是一块内存空间(此处的内存空间不是我们的硬盘空间而是内存RAM,可以立即运行代码的空间)

      进程或者运行着的代码在linux或者unix系统中的进程空间一般按如下方式安排:

      

      相当于前面说了这么多就是用内核栈来管理这个进程地址空间里面的进程执行的情况,这也就是c程序的存储空间布局。

      代码:这是由cpu执行的机器指令部分,通常代码部分常常只是只读的,以防止程序由于意外而修改其指令

      初始化数据段:通常将此段称为数据段,它包含了程序中需要明确地赋初值的变量,c程序中任何函数之外的声明,如 int a=9;使此变量以其初值存放在初始化数据段中。

      未初始化数据段:通常将此段称为bss段,在程序开始执行之前,内核将此段中的数据初始化为0或空指针。如函数外的声明:long sum[1000],使此变量存放在非初始化数据段中

      栈:栈非常重要,我们的函数调用就离不开栈对其数据进行保存。自动变量以及每次函数调用时所需保存的信息都存放在此段中。每次函数调用时,其返回地址及调用者的环境信息(如某些机器寄存器的值)都存放在栈中。然后,最近被调用的函数在栈上为其自动和临时变量分配存储空间。通过以这种方式使用栈,c递归函数可以工作。递归函数每次调用自身时,就用一个新的栈帧,因此一次函数调用实例中的变量集不会影响另一次函数调用实例中的变量。

      堆:通常在堆中进行动态存储分配。由于历史上形成的惯例,堆位于未初始化数据段和栈之间。

      在C语言中我们经常会申请一个动态的存储空间,如malloc()、calloc()、realloc()。

      #include“stdlib.h”

      void *malloc(size_t size)

      void *calloc(size_t size)

      void *realloc(size_t size)

      void *free(void *ptr)

      这三个函数所返回的指针一定是适当对其的,可以使用于任何数据对象。malloc函数分配制定字节的存储区,此存储区的初始值不确定。

      在内存分配时我们使用了内存分配函数,这些内存分配函数是以库函数出现的我们常用的库有C标准库及linux中常用的glibc库,这些库都已经实现了对内存的分配的函数,如malloc,malloc函数是以brk()和mmap()系统调用实现的C语言库函数。在前面我们讲过所有的内存的分配都是由内核栈进行管理的,我们调用glibc库中的内存分配函数,系统会产生一个系统调用,内核会在管理的进程地址空间的堆里面进行动态内存的分配,这里就是通过malloc()函数调用brk()函数然后brk()函数进行系统调用来完成动态内存的分配。内核执行sys_brk(addr)函数进行内存分配。

      这里有比较困惑的地方,我们讲了内存分配,其实在用fork()函数产生一个新的进程或者线程时也会进行内存分配,我们的进程在执行过程中也会进行内存分配,这两者都是内核通过分配函数进行分配的,不同的是fork()函数产生的内存分配主要是分配每个进程都需要的内核栈、存放页表的内存、以及代码执行时需要的内存空间也就是我们所说的进程内存空间,总的来说就是调用fork()函数分配的内存主要是1、存放管理内存信息 2、进程执行时存放代码及数据就是我们前面画的进程存储空间。用malloc()函数分配的内存是在已经为进程分配好的内存空间中的堆里面分配一块内存区域用于存放数据。所以这两者都是通过内核进行内存分配用途却大不相同。

      下面讲一下在进程空间堆中分配动态内存的流程

        C库函数malloc()调用unix/linux系统调用函数brk(),brk()执行系统调用陷入到内核,内核执行sys_brk()函数,sys_brk()函数调用do_brk()进行内存分配

               malloc()---------->brk()-----|----->sys_brk()----------->do_brk()------------>vma_merge()/kmem_cache_zalloc()

                              用户空间------>    |             内核空间

                    系统调用---------->

        sys_brk()函数

     1 asmlinkage unsigned long sys_brk(unsigned long brk)
     2 {
     3     unsigned long rlim, retval;
     4     unsigned long newbrk, oldbrk;
     5     struct mm_struct *mm = current->mm;
     6 
     7     down_write(&mm->mmap_sem);
     8 
     9     if (brk < mm->end_code)
    10         goto out;
    11 
    12     /*
    13      * Check against rlimit here. If this check is done later after the test
    14      * of oldbrk with newbrk then it can escape the test and let the data
    15      * segment grow beyond its set limit the in case where the limit is
    16      * not page aligned -Ram Gupta
    17      */
    18     rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
    19     if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
    20         goto out;
    21 
    22     newbrk = PAGE_ALIGN(brk);
    23     oldbrk = PAGE_ALIGN(mm->brk);
    24     if (oldbrk == newbrk)
    25         goto set_brk;
    26 
    27     /* Always allow shrinking brk. */
    28     if (brk <= mm->brk) {
    29         if (!do_munmap(mm, newbrk, oldbrk-newbrk))
    30             goto set_brk;
    31         goto out;
    32     }
    33 
    34     /* Check against existing mmap mappings. */
    35     if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
    36         goto out;
    37 
    38     /* Ok, looks good - let it rip. */
    39     if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
    40         goto out;
    41 set_brk:
    42     mm->brk = brk;
    43 out:
    44     retval = mm->brk;
    45     up_write(&mm->mmap_sem);
    46     return retval;
    47 }

        do_brk()函数

        

     1 unsigned long do_brk(unsigned long addr, unsigned long len)
     2 {
     3     struct mm_struct * mm = current->mm;
     4     struct vm_area_struct * vma, * prev;
     5     unsigned long flags;
     6     struct rb_node ** rb_link, * rb_parent;
     7     pgoff_t pgoff = addr >> PAGE_SHIFT;
     8     int error;
     9 
    10     len = PAGE_ALIGN(len);
    11     if (!len)
    12         return addr;
    13 
    14     if ((addr + len) > TASK_SIZE || (addr + len) < addr)
    15         return -EINVAL;
    16 
    17     if (is_hugepage_only_range(mm, addr, len))
    18         return -EINVAL;
    19 
    20     flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
    21 
    22     error = arch_mmap_check(addr, len, flags);
    23     if (error)
    24         return error;
    25 
    26     /*
    27      * mlock MCL_FUTURE?
    28      */
    29     if (mm->def_flags & VM_LOCKED) {
    30         unsigned long locked, lock_limit;
    31         locked = len >> PAGE_SHIFT;
    32         locked += mm->locked_vm;
    33         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
    34         lock_limit >>= PAGE_SHIFT;
    35         if (locked > lock_limit && !capable(CAP_IPC_LOCK))
    36             return -EAGAIN;
    37     }
    38 
    39     /*
    40      * mm->mmap_sem is required to protect against another thread
    41      * changing the mappings in case we sleep.
    42      */
    43     verify_mm_writelocked(mm);
    44 
    45     /*
    46      * Clear old maps.  this also does some error checking for us
    47      */
    48  munmap_back:
    49     vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
    50     if (vma && vma->vm_start < addr + len) {
    51         if (do_munmap(mm, addr, len))
    52             return -ENOMEM;
    53         goto munmap_back;
    54     }
    55 
    56     /* Check against address space limits *after* clearing old maps... */
    57     if (!may_expand_vm(mm, len >> PAGE_SHIFT))
    58         return -ENOMEM;
    59 
    60     if (mm->map_count > sysctl_max_map_count)
    61         return -ENOMEM;
    62 
    63     if (security_vm_enough_memory(len >> PAGE_SHIFT))
    64         return -ENOMEM;
    65 
    66     /* Can we just expand an old private anonymous mapping? */
    67     if (vma_merge(mm, prev, addr, addr + len, flags,
    68                     NULL, NULL, pgoff, NULL))
    69         goto out;
    70 
    71     /*
    72      * create a vma struct for an anonymous mapping
    73      */
    74     vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
    75     if (!vma) {
    76         vm_unacct_memory(len >> PAGE_SHIFT);
    77         return -ENOMEM;
    78     }
    79 
    80     vma->vm_mm = mm;
    81     vma->vm_start = addr;
    82     vma->vm_end = addr + len;
    83     vma->vm_pgoff = pgoff;
    84     vma->vm_flags = flags;
    85     vma->vm_page_prot = protection_map[flags &
    86                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
    87     vma_link(mm, vma, prev, rb_link, rb_parent);
    88 out:
    89     mm->total_vm += len >> PAGE_SHIFT;
    90     if (flags & VM_LOCKED) {
    91         mm->locked_vm += len >> PAGE_SHIFT;
    92         make_pages_present(addr, addr + len);
    93     }
    94     return addr;
    95 }

        vma_merge()函数

       1 struct vm_area_struct *vma_merge(struct mm_struct *mm,
       2             struct vm_area_struct *prev, unsigned long addr,
       3             unsigned long end, unsigned long vm_flags,
       4                  struct anon_vma *anon_vma, struct file *file,
       5             pgoff_t pgoff, struct mempolicy *policy)
       6 {
       7     pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
       8     struct vm_area_struct *area, *next;
       9 
      10     /*
      11      * We later require that vma->vm_flags == vm_flags,
      12      * so this tests vma->vm_flags & VM_SPECIAL, too.
      13      */
      14     if (vm_flags & VM_SPECIAL)
      15         return NULL;
      16 
      17     if (prev)
      18         next = prev->vm_next;
      19     else
      20         next = mm->mmap;
      21     area = next;
      22     if (next && next->vm_end == end)        /* cases 6, 7, 8 */
      23         next = next->vm_next;
      24 
      25     /*
      26      * Can it merge with the predecessor?
      27      */
      28     if (prev && prev->vm_end == addr &&
      29               mpol_equal(vma_policy(prev), policy) &&
      30             can_vma_merge_after(prev, vm_flags,
      31                         anon_vma, file, pgoff)) {
      32         /*
      33          * OK, it can.  Can we now merge in the successor as well?
      34          */
      35         if (next && end == next->vm_start &&
      36                 mpol_equal(policy, vma_policy(next)) &&
      37                 can_vma_merge_before(next, vm_flags,
      38                     anon_vma, file, pgoff+pglen) &&
      39                 is_mergeable_anon_vma(prev->anon_vma,
      40                               next->anon_vma)) {
      41                             /* cases 1, 6 */
      42             vma_adjust(prev, prev->vm_start,
      43                 next->vm_end, prev->vm_pgoff, NULL);
      44         } else                    /* cases 2, 5, 7 */
      45             vma_adjust(prev, prev->vm_start,
      46                 end, prev->vm_pgoff, NULL);
      47         return prev;
      48     }
      49 
      50     /*
      51      * Can this new request be merged in front of next?
      52      */
      53     if (next && end == next->vm_start &&
      54              mpol_equal(policy, vma_policy(next)) &&
      55             can_vma_merge_before(next, vm_flags,
      56                     anon_vma, file, pgoff+pglen)) {
      57         if (prev && addr < prev->vm_end)    /* case 4 */
      58             vma_adjust(prev, prev->vm_start,
      59                 addr, prev->vm_pgoff, NULL);
      60         else                    /* cases 3, 8 */
      61             vma_adjust(area, addr, next->vm_end,
      62                 next->vm_pgoff - pglen, NULL);
      63         return area;
      64     }
      65 
      66     return NULL;
      67 }

        kmem_cache_zalloc()函数

    1 void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
    2 {
    3     void *ret = kmem_cache_alloc(c, flags);
    4     if (ret)
    5         memset(ret, 0, c->size);
    6 
    7     return ret;
    8 }

      

      

  • 相关阅读:
    Corn Fields
    状压DP
    全排列函数
    搜索
    前缀和与差分
    最小花费
    【Lintcode】062.Search in Rotated Sorted Array
    【LeetCode】039. Combination Sum
    【LeetCode】040. Combination Sum II
    【LeetCode】047. Permutations II
  • 原文地址:https://www.cnblogs.com/qiuheng/p/5745767.html
Copyright © 2011-2022 走看看