zoukankan      html  css  js  c++  java
  • linux物理内存描述

    linux使用于广泛的体系结构,因此需要用一种与体系结构无关的方式来描述内存。linux用VM描述和管理内存。在VM中兽药的普遍概念就是非一致内存访问。对于大型机器而言,内存会分成许多簇,依据簇与处理器“距离”的不同,访问不同的簇会有不同的代价。

    每个簇都被认为是一个节点(pg_data_t),每个节点被分成很多的成为管理区(zone)的块,用于表示内存中的某个范围。除了ZONE_DMA,ZONE_NORMAL,ZONE_HIGHMEM以外,linux2.6.32中引入了ZONE_MOVABLE,用于适应大块连续内存的分配。

    每个物理页面由一个page结构体描述,所有的结构都存储在一个全局的mem_map数组中(非平板模式),该数组通常存放在ZONE_NORMAL的首部,或者就在校内存系统中为装入内核映像而预留的区域之后。

    节点

    内存的每个节点都有pg_data_t描述,在分配一个页面时,linux采用节点局部分配的策略,从最靠近运行中的CPU的节点分配内存。由于进程往往是在同一个CPU上运行,因此从当前节点得到的内存很可能被用到。

    /*
     * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
     * (mostly NUMA machines?) to denote a higher-level memory zone than the
     * zone denotes.
     *
     * On NUMA machines, each NUMA node would have a pg_data_t to describe
     * it's memory layout.
     *
     * Memory statistics and page replacement data structures are maintained on a
     * per-zone basis.
     */
    struct bootmem_data;
    typedef struct pglist_data {
    	 /*该节点内的内存区。可能的区域类型用zone_type表示。 */
    	struct zone node_zones[MAX_NR_ZONES];
    	 /* 该节点的备用内存区。当节点没有可用内存时,就从备用区中分配内存。*/
    	struct zonelist node_zonelists[MAX_ZONELISTS];
    	  /*可用内存区数目,即node_zones数据中保存的最后一个有效区域的索引*/
    	int nr_zones;
    #ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
    	 /* 在平坦型的内存模型中,它指向本节点第一个页面的描述符。 */
    	struct page *node_mem_map;
    #ifdef CONFIG_CGROUP_MEM_RES_CTLR
    	/*cgroup相关*/
    	struct page_cgroup *node_page_cgroup;
    #endif
    #endif
      /**
              * 在内存子系统初始化以前,即boot阶段也需要进行内存管理。
              * 此结构用于这个阶段的内存管理。
              */
    	struct bootmem_data *bdata;
    #ifdef CONFIG_MEMORY_HOTPLUG
    	/*
    	 * Must be held any time you expect node_start_pfn, node_present_pages
    	 * or node_spanned_pages stay constant.  Holding this will also
    	 * guarantee that any pfn_valid() stays that way.
    	 *
    	 * Nests above zone->lock and zone->size_seqlock.
    	 */
    	    /*当系统支持内存热插拨时,用于保护本结构中的与节点大小相关的字段。
              	哪调用node_start_pfn,node_present_pages,node_spanned_pages相关的代码时,需要使用该锁。
              */
    	spinlock_t node_size_lock;
    #endif
    	/*起始页面帧号,指出该节点在全局mem_map中
    	的偏移*/
    	unsigned long node_start_pfn;
    	unsigned long node_present_pages; /* total number of physical pages */
    	unsigned long node_spanned_pages; /* total size of physical page range, including holes */
    	/*节点编号*/					    
    	int node_id;
    	/*等待该节点内的交换守护进程的等待队列。将节点中的页帧换出时会用到。*/
    	wait_queue_head_t kswapd_wait;
    	/*负责该节点的交换守护进程。*/
    	struct task_struct *kswapd;
    	/*由页交换子系统使用,定义要释放的区域大小。*/
    	int kswapd_max_order;
    } pg_data_t;


    管理区

    每个管理区由一个zone结构体描述,对于管理区的类型描述如下

    enum zone_type {
    #ifdef CONFIG_ZONE_DMA
    	/*
    	 * ZONE_DMA is used when there are devices that are not able
    	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
    	 * carve out the portion of memory that is needed for these devices.
    	 * The range is arch specific.
    	 *
    	 * Some examples
    	 *
    	 * Architecture		Limit
    	 * ---------------------------
    	 * parisc, ia64, sparc	<4G
    	 * s390			<2G
    	 * arm			Various
    	 * alpha		Unlimited or 0-16MB.
    	 *
    	 * i386, x86_64 and multiple other arches
    	 * 			<16M.
    	 */
    	ZONE_DMA,
    #endif
    #ifdef CONFIG_ZONE_DMA32
    	/*
    	 * x86_64 needs two ZONE_DMAs because it supports devices that are
    	 * only able to do DMA to the lower 16M but also 32 bit devices that
    	 * can only do DMA areas below 4G.
    	 */
    	ZONE_DMA32,
    #endif
    	/*
    	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
    	 * performed on pages in ZONE_NORMAL if the DMA devices support
    	 * transfers to all addressable memory.
    	 */
    	ZONE_NORMAL,
    #ifdef CONFIG_HIGHMEM
    	/*
    	 * A memory area that is only addressable by the kernel through
    	 * mapping portions into its own address space. This is for example
    	 * used by i386 to allow the kernel to address the memory beyond
    	 * 900MB. The kernel will set up special mappings (page
    	 * table entries on i386) for each page that the kernel needs to
    	 * access.
    	 */
    	ZONE_HIGHMEM,
    #endif
    	/*
              这是一个伪内存段。为了防止形成物理内存碎片,
              可以将虚拟地址对应的物理地址进行迁移。
              */
    	ZONE_MOVABLE,
    	__MAX_NR_ZONES
    };


     

    里面的英文注释已经写的很详细了。

    管理区用于跟踪诸如页面使用情况统计数,空闲区域信息和锁信息等。

    struct zone {
    	/* Fields commonly accessed by the page allocator */
    
    	/* zone watermarks, access with *_wmark_pages(zone) macros */
    	/*本管理区的三个水线值:高水线(比较充足)、低水线、MIN水线。*/
    	unsigned long watermark[NR_WMARK];
    
    	/*
    	 * We don't know if the memory that we're going to allocate will be freeable
    	 * or/and it will be released eventually, so to avoid totally wasting several
    	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
    	 * to run OOM on the lower zones despite there's tons of freeable ram
    	 * on the higher zones). This array is recalculated at runtime if the
    	 * sysctl_lowmem_reserve_ratio sysctl changes.
    	 */
    	  /**
              * 当高端内存、normal内存区域中无法分配到内存时,需要从normal、DMA区域中分配内存。
              * 为了避免DMA区域被消耗光,需要额外保留一些内存供驱动使用。
              * 该字段就是指从上级内存区退到回内存区时,需要额外保留的内存数量。
              */
    	unsigned long		lowmem_reserve[MAX_NR_ZONES];
    
    #ifdef CONFIG_NUMA
    	/*所属的NUMA节点。*/
    	int node;
    	/*
    	 * zone reclaim becomes active if more unmapped pages exist.
    	 */
    	 /*当可回收的页超过此值时,将进行页面回收。*/
    	unsigned long		min_unmapped_pages;
    	/*当管理区中,用于slab的可回收页大于此值时,将回收slab中的缓存页。*/
    	unsigned long		min_slab_pages;
    	/*
              * 每CPU的页面缓存。
              * 当分配单个页面时,首先从该缓存中分配页面。这样可以:
              *避免使用全局的锁
              * 避免同一个页面反复被不同的CPU分配,引起缓存行的失效。
              * 避免将管理区中的大块分割成碎片。
              */
    	struct per_cpu_pageset	*pageset[NR_CPUS];
    #else
    	struct per_cpu_pageset	pageset[NR_CPUS];
    #endif
    	/*
    	 * free areas of different sizes
    	 */
    	 /*该锁用于保护伙伴系统数据结构。即保护free_area相关数据。*/
    	spinlock_t		lock;
    #ifdef CONFIG_MEMORY_HOTPLUG
    	/* see spanned/present_pages for more description */
    	/*用于保护spanned/present_pages等变量。这些变量几乎不会发生变化,除非发生了内存热插拨操作。
               这几个变量并不被lock字段保护。并且主要用于读,因此使用读写锁。*/
    	seqlock_t		span_seqlock;
    #endif
    	/*伙伴系统的主要变量。这个数组定义了11个队列,每个队列中的元素都是大小为2^n的页面*/
    	struct free_area	free_area[MAX_ORDER];
    
    #ifndef CONFIG_SPARSEMEM
    	/*
    	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
    	 * In SPARSEMEM, this map is stored in struct mem_section
    	 */
    	 /*本管理区里的页面标志数组*/
    	unsigned long		*pageblock_flags;
    #endif /* CONFIG_SPARSEMEM */
    
    	/*填充的未用字段,确保后面的字段是缓存行对齐的*/
    	ZONE_PADDING(_pad1_)
    
    	/* Fields commonly accessed by the page reclaim scanner */
     	/*
              * lru相关的字段用于内存回收。这个字段用于保护这几个回收相关的字段。
              * lru用于确定哪些字段是活跃的,哪些不是活跃的,并据此确定应当被写回到磁盘以释放内存。
              */
    	spinlock_t		lru_lock;	
    	/* 匿名活动页、匿名不活动页、文件活动页、文件不活动页链表头*/
    	struct zone_lru {
    		struct list_head list;
    	} lru[NR_LRU_LISTS];
    	/*页面回收状态*/
    	struct zone_reclaim_stat reclaim_stat;
    	/*自从最后一次回收页面以来,扫过的页面数*/
    	unsigned long		pages_scanned;	   /* since last reclaim */
    	unsigned long		flags;		   /* zone flags, see below */
    
    	/* Zone statistics */
    	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
    
    	/*
    	 * prev_priority holds the scanning priority for this zone.  It is
    	 * defined as the scanning priority at which we achieved our reclaim
    	 * target at the previous try_to_free_pages() or balance_pgdat()
    	 * invokation.
    	 *
    	 * We use prev_priority as a measure of how much stress page reclaim is
    	 * under - it drives the swappiness decision: whether to unmap mapped
    	 * pages.
    	 *
    	 * Access to both this field is quite racy even on uniprocessor.  But
    	 * it is expected to average out OK.
    	 */
    	int prev_priority;
    
    	/*
    	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
    	 * this zone's LRU.  Maintained by the pageout code.
    	 */
    	unsigned int inactive_ratio;
    
    	/*为cache对齐*/
    	ZONE_PADDING(_pad2_)
    	/* Rarely used or read-mostly fields */
    
    	/*
    	 * wait_table		-- the array holding the hash table
    	 * wait_table_hash_nr_entries	-- the size of the hash table array
    	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
    	 *
    	 * The purpose of all these is to keep track of the people
    	 * waiting for a page to become available and make them
    	 * runnable again when possible. The trouble is that this
    	 * consumes a lot of space, especially when so few things
    	 * wait on pages at a given time. So instead of using
    	 * per-page waitqueues, we use a waitqueue hash table.
    	 *
    	 * The bucket discipline is to sleep on the same queue when
    	 * colliding and wake all in that wait queue when removing.
    	 * When something wakes, it must check to be sure its page is
    	 * truly available, a la thundering herd. The cost of a
    	 * collision is great, but given the expected load of the
    	 * table, they should be so rare as to be outweighed by the
    	 * benefits from the saved space.
    	 *
    	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
    	 * primary users of these fields, and in mm/page_alloc.c
    	 * free_area_init_core() performs the initialization of them.
    	 */
    	wait_queue_head_t	* wait_table;
    	unsigned long		wait_table_hash_nr_entries;
    	unsigned long		wait_table_bits;
    
    	/*
    	 * Discontig memory support fields.
    	 */
    	 /*管理区属于的节点*/
    	struct pglist_data	*zone_pgdat;
    	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
    	/*管理区的页面在mem_map中的偏移*/
    	unsigned long		zone_start_pfn;
    
    	/*
    	 * zone_start_pfn, spanned_pages and present_pages are all
    	 * protected by span_seqlock.  It is a seqlock because it has
    	 * to be read outside of zone->lock, and it is done in the main
    	 * allocator path.  But, it is written quite infrequently.
    	 *
    	 * The lock is declared along with zone->lock because it is
    	 * frequently read in proximity to zone->lock.  It's good to
    	 * give them a chance of being in the same cacheline.
    	 */
    	unsigned long		spanned_pages;	/* total size, including holes */
    	unsigned long		present_pages;	/* amount of memory (excluding holes) */
    
    	/*
    	 * rarely used fields:
    	 */
    	const char		*name;
    } ____cacheline_internodealigned_in_smp;


    没有说明的地方,内核中的英文注释已经写得很清楚了。

    页面

    系统中每个物理页面都有一个相关联的page用于记录该页面的状态。

    /*
     * Each physical page in the system has a struct page associated with
     * it to keep track of whatever it is we are using the page for at the
     * moment. Note that we have no way to track which tasks are using
     * a page, though if it is a pagecache page, rmap structures can tell us
     * who is mapping it.
     */
    struct page {
    	unsigned long flags;		/* Atomic flags, some possibly
    					 * updated asynchronously */
    	atomic_t _count;		/* Usage count, see below. */
    	union {
    		atomic_t _mapcount;	/* Count of ptes mapped in mms,
    					 * to show when page is mapped
    					 * & limit reverse map searches.
    					 */
    		struct {		/* SLUB */
    			u16 inuse;
    			u16 objects;
    		};
    	};
    	union {
    	    struct {
    		unsigned long private;		/* Mapping-private opaque data:
    					 	 * usually used for buffer_heads
    						 * if PagePrivate set; used for
    						 * swp_entry_t if PageSwapCache;
    						 * indicates order in the buddy
    						 * system if PG_buddy is set.
    						 */
    		struct address_space *mapping;	/* If low bit clear, points to
    						 * inode address_space, or NULL.
    						 * If page mapped as anonymous
    						 * memory, low bit is set, and
    						 * it points to anon_vma object:
    						 * see PAGE_MAPPING_ANON below.
    						 */
    	    };
    #if USE_SPLIT_PTLOCKS
    	    spinlock_t ptl;
    #endif
    	    struct kmem_cache *slab;	/* SLUB: Pointer to slab */
    	/* 如果属于伙伴系统,并且不是伙伴系统中的第一个页
    	则指向第一个页*/
    	    struct page *first_page;	/* Compound tail pages */
    	};
    	union {/*如果是文件映射,那么表示本页面在文件中的位置(偏移)*/
    		pgoff_t index;		/* Our offset within mapping. */
    		void *freelist;		/* SLUB: freelist req. slab lock */
    	};
    	struct list_head lru;		/* Pageout list, eg. active_list
    					 * protected by zone->lru_lock !
    					 */
    	/*
    	 * On machines where all RAM is mapped into kernel address space,
    	 * we can simply calculate the virtual address. On machines with
    	 * highmem some memory is mapped into kernel virtual memory
    	 * dynamically, so we need a place to store that address.
    	 * Note that this field could be 16 bits on x86 ... ;)
    	 *
    	 * Architectures with slow multiplication can define
    	 * WANT_PAGE_VIRTUAL in asm/page.h
    	 */
    #if defined(WANT_PAGE_VIRTUAL)
    	void *virtual;			/* Kernel virtual address (NULL if
    					   not kmapped, ie. highmem) */
    #endif /* WANT_PAGE_VIRTUAL */
    #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
    	unsigned long debug_flags;	/* Use atomic bitops on this */
    #endif
    
    #ifdef CONFIG_KMEMCHECK
    	/*
    	 * kmemcheck wants to track the status of each byte in a page; this
    	 * is a pointer to such a status block. NULL if not tracked.
    	 */
    	void *shadow;
    #endif
    };


    linux中主要的结构描述体现了linux物理内存管理的设计。后面会介绍linux内存管理的各个细节。

  • 相关阅读:
    JAVA多线程与并发学习总结
    Java虚拟机类加载机制
    2013网易校园招聘笔试题
    人人网面试题
    2010人人网校园招聘笔试题
    2011人人网校园招聘笔试题
    2012人人网校园招聘笔试题
    2013人人网校园招聘笔试题
    海量数据查找唯一数据问题
    Hulu面试题
  • 原文地址:https://www.cnblogs.com/hehehaha/p/6332943.html
Copyright © 2011-2022 走看看