zoukankan      html  css  js  c++  java
  • zoneinfo

    由于有次使用for_each_zone 的函数 无法使用

    mm/bootmem.c#0027 这里定义吗 ?
    0026 #ifndef CONFIG_NEED_MULTIPLE_NODES 0027 struct pglist_data __refdata contig_page_data = { 0028 .bdata = &bootmem_node_data[0] 0029 }; 0030 EXPORT_SYMBOL(contig_page_data);

    mm/bootmem.c#0027 ?
    0026 #ifndef CONFIG_NEED_MULTIPLE_NODES
    0027 struct pglist_data __refdata contig_page_data;
    0028 EXPORT_SYMBOL(contig_page_data);
    0029 #endif

    直接有个结构体:
    0860 extern struct pglist_data contig_page_data;


    module 打印
         printk( "node_data = *%p* ", &contig_page_data );
            printk( "contig_page_data.node_zones[0] = [%s] ",
                    contig_page_data.node_zones[0].name );
            printk( "contig_page_data.node_zones[1] = [%s] ",
                    contig_page_data.node_zones[1].name );
            printk( "contig_page_data.node_zones[2] = [%s] ",
                    contig_page_data.node_zones[2].name );
            if( contig_page_data.node_zones[3].name )
                    printk( "contig_page_data.node_zones[3] = [%s] ",
                            contig_page_data.node_zones[3].name );

    输出:
    node_data = *c18a0580*
    contig_page_data.node_zones[0] = [DMA]
    contig_page_data.node_zones[1] = [Normal]
    contig_page_data.node_zones[2] = [HighMem]
    contig_page_data.node_zones[3] = [Movable]

    记录,便于观看内核的mm



    struct zone {
    0330         /* Fields commonly accessed by the page allocator  这个域通常被page allocator 访问*/
    0331 
    0332         /* zone watermarks, access with *_wmark_pages(zone) macros 使用宏访问*/
    0333         unsigned long watermark[NR_WMARK];
    0334 
    0335         /*
    0336          * When free pages are below this point, additional steps are taken
    0337          * when reading the number of free pages to avoid per-cpu counter
    0338          * drift allowing watermarks to be breached
    0339          */
    0340         unsigned long percpu_drift_mark;
    0341 
    0342         /*
    0343          * We don't know if the memory that we're going to allocate will be freeable
    0344          * or/and it will be released eventually, so to avoid totally wasting several
    0345          * GB of ram we must reserve some of the lower zone memory (otherwise we risk
    0346          * to run OOM on the lower zones despite there's tons of freeable ram
    0347          * on the higher zones). This array is recalculated at runtime if the
    0348          * sysctl_lowmem_reserve_ratio sysctl changes.
    0349          */
    0350         unsigned long           lowmem_reserve[MAX_NR_ZONES];
    0351 
    0352         /*
    0353          * This is a per-zone reserve of pages that should not be
    0354          * considered dirtyable memory.
    0355          */
    0356         unsigned long           dirty_balance_reserve;
    0357 
    0358 #ifdef CONFIG_NUMA
    0359         int node;
    0360         /*
    0361          * zone reclaim becomes active if more unmapped pages exist.
    0362          */
    0363         unsigned long           min_unmapped_pages;
    0364         unsigned long           min_slab_pages;
    0365 #endif
    0366         struct per_cpu_pageset __percpu *pageset;
    0367         /*
    0368          * free areas of different sizes
    0369          */
    0370         spinlock_t              lock;
    0371         int                     all_unreclaimable; /* All pages pinned */
    0372 #ifdef CONFIG_MEMORY_HOTPLUG
    0373         /* see spanned/present_pages for more description */
    0374         seqlock_t               span_seqlock;
    0375 #endif
    0376 #ifdef CONFIG_CMA
    0377         /*
    0378          * CMA needs to increase watermark levels during the allocation
    0379          * process to make sure that the system is not starved.
    0380          */
    0381         unsigned long           min_cma_pages;
    0382 #endif
    0383         struct free_area        free_area[MAX_ORDER];
    0384 
    0385 #ifndef CONFIG_SPARSEMEM
    0386         /*
    0387          * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
    0388          * In SPARSEMEM, this map is stored in struct mem_section
    0389          */
    0390         unsigned long           *pageblock_flags;
    0391 #endif /* CONFIG_SPARSEMEM */
    0392 
    0393 #ifdef CONFIG_COMPACTION
    0394         /*
    0395          * On compaction failure, 1<<compact_defer_shift compactions
    0396          * are skipped before trying again. The number attempted since
    0397          * last failure is tracked with compact_considered.
    0398          */
    0399         unsigned int            compact_considered;
    0400         unsigned int            compact_defer_shift;
    0401         int                     compact_order_failed;
    0402 #endif
    0403 
    0404         ZONE_PADDING(_pad1_)
    0405 
    0406         /* Fields commonly accessed by the page reclaim scanner */
    0407         spinlock_t              lru_lock;
    0408         struct lruvec           lruvec;
    0409 
    0410         unsigned long           pages_scanned;     /* since last reclaim */
    0411         unsigned long           flags;             /* zone flags, see below */
    0412 
    0413         /* Zone statistics */
    0414         atomic_long_t           vm_stat[NR_VM_ZONE_STAT_ITEMS];
    0415 
    0416         /*
    0417          * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
    0418          * this zone's LRU.  Maintained by the pageout code.
    0419          */
    0420         unsigned int inactive_ratio;
    0421 
    0422 
    0423         ZONE_PADDING(_pad2_)
    0424         /* Rarely used or read-mostly fields */
    0425 
    0426         /*
    0427          * wait_table           -- the array holding the hash table
    0428          * wait_table_hash_nr_entries   -- the size of the hash table array
    0429          * wait_table_bits      -- wait_table_size == (1 << wait_table_bits)
    0430          *
    0431          * The purpose of all these is to keep track of the people
    0432          * waiting for a page to become available and make them
    0433          * runnable again when possible. The trouble is that this
    0434          * consumes a lot of space, especially when so few things
    0435          * wait on pages at a given time. So instead of using
    0436          * per-page waitqueues, we use a waitqueue hash table.
    0437          *
    0438          * The bucket discipline is to sleep on the same queue when
    0439          * colliding and wake all in that wait queue when removing.
    0440          * When something wakes, it must check to be sure its page is
    0441          * truly available, a la thundering herd. The cost of a
    0442          * collision is great, but given the expected load of the
    0443          * table, they should be so rare as to be outweighed by the
    0444          * benefits from the saved space.
    0445          *
    0446          * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
    0447          * primary users of these fields, and in mm/page_alloc.c
    0448          * free_area_init_core() performs the initialization of them.
    0449          */
    0450         wait_queue_head_t       * wait_table;
    0451         unsigned long           wait_table_hash_nr_entries;
    0452         unsigned long           wait_table_bits;
    0453 
    0454         /*
    0455          * Discontig memory support fields.
    0456          */
    0457         struct pglist_data      *zone_pgdat;
    0458         /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
    0459         unsigned long           zone_start_pfn;
    0460 
    0461         /*
    0462          * zone_start_pfn, spanned_pages and present_pages are all
    0463          * protected by span_seqlock.  It is a seqlock because it has
    0464          * to be read outside of zone->lock, and it is done in the main
    0465          * allocator path.  But, it is written quite infrequently.
    0466          *
    0467          * The lock is declared along with zone->lock because it is
    0468          * frequently read in proximity to zone->lock.  It's good to
    0469          * give them a chance of being in the same cacheline.
    0470          */
    0471         unsigned long           spanned_pages;  /* total size, including holes */
    0472         unsigned long           present_pages;  /* amount of memory (excluding holes) */
    0473 
    0474         /*
    0475          * rarely used fields:
    0476          */
    0477         const char              *name;
    0478 } ____cacheline_internodealigned_in_smp;


    起始帧
    zone_start_pfn DMA = 16
    zone_start_pfn NORMAL = 4096
    zone_start_pfn HIGH = 228350
    zone_start_pfn MOVIABLE = 0
     




  • 相关阅读:
    Python基础语法
    理解session和cookie
    应用服务器
    web服务器
    Python正则表达式
    理解HTTP协议
    常见浏览器内核
    python中range()和len()函数区别
    多线程执行测试用例
    selenium+Python(生成html测试报告)
  • 原文地址:https://www.cnblogs.com/kwingmei/p/3276571.html
Copyright © 2011-2022 走看看