http://blog.chinaunix.net/uid-26859697-id-4848269.html



前面已经分析了内存管理框架的构建实现过程,有部分内容未完全呈现出来,这里主要做个补充。

如下图,这是前面已经看到过的linux物理内存管理框架的层次关系。

>

 

现着重分析一下各个管理结构体的成员功能作用。



1. 【file:/include/linux/mmzone.h】
2. typedef struct pglist_data {
3.     struct zone node_zones[MAX_NR_ZONES];
4.     struct zonelist node_zonelists[MAX_ZONELISTS];
5.     int nr_zones;
6. #ifdef CONFIG_FLAT_NODE_MEM_MAP / means !SPARSEMEM /
7.     struct page node_mem_map;
8. #ifdef CONFIG_MEMCG
9.     struct page_cgroup node_page_cgroup;
10. #endif
11. #endif
12. #ifndef CONFIG_NO_BOOTMEM
13.     struct bootmem_data bdata;
14. #endif
15. #ifdef CONFIG_MEMORY_HOTPLUG
16.     /
17.       Must be held any time you expect node_start_pfn, node_present_pages
18.       or node_spanned_pages stay constant. Holding this will also
19.       guarantee that any pfn_valid() stays that way.
20.      
21.       pgdat_resize_lock() and pgdat_resize_unlock() are provided to
22.       manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
23.      
24.       Nests above zone->lock and zone->span_seqlock
25.      /
26.     spinlock_t node_size_lock;
27. #endif
28.     unsigned long node_start_pfn;
29.     unsigned long node_present_pages; / total number of physical pages /
30.     unsigned long node_spanned_pages; / total size of physical page
31.                          range, including holes /
32.     int node_id;
33.     nodemask_t reclaim_nodes; / Nodes allowed to reclaim from /
34.     wait_queue_head_t kswapd_wait;
35.     wait_queue_head_t pfmemalloc_wait;
36.     struct task_struct kswapd; / Protected by lock_memory_hotplug() /
37.     int kswapd_max_order;
38.     enum zone_type classzone_idx;
39. #ifdef CONFIG_NUMA_BALANCING
40.     / Lock serializing the migrate rate limiting window /
41.     spinlock_t numabalancing_migrate_lock;
42.  
43.     / Rate limiting time interval /
44.     unsigned long numabalancing_migrate_next_window;
45.  
46.     / Number of pages migrated during the rate limiting time interval /
47.     unsigned long numabalancing_migrate_nr_pages;
48. #endif
49. } pg_data_t;
    truct zone node_zones[MAX_NR_ZONES];

    &mdash;&mdash;存放该<span style="-ms-word-wrap: break-word;">pg_data_t</span>里面的<span style="-ms-word-wrap: break-word;">zone</span>;

    struct zonelist node_zonelists[MAX_ZONELISTS];

    &mdash;&mdash;其用于管理备用节点及内存域的列表,该列表表示内存分配策略。该链表将<span style="-ms-word-wrap: break-word;">node_zones</span>串联起来,其串联<span style="-ms-word-wrap: break-word;">zone</span>的顺序就是各区的内存申请顺序,例如<span style="-ms-word-wrap: break-word;">normal-&gt;dma-&gt;highmem</span>,申请时也将会是先从<span style="-ms-word-wrap: break-word;">normal</span>区中申请,如果申请不到,再依序到从<span style="-ms-word-wrap: break-word;">dma</span>区、<span style="-ms-word-wrap: break-word;">highmem</span>区去申请;

    int nr_zones;

    &mdash;&mdash;用于记录<span style="-ms-word-wrap: break-word;">zone</span>的个数;

    struct page *node_mem_map;

    &mdash;&mdash;其指向一个<span style="-ms-word-wrap: break-word;">page</span>结构的数组,数组中的每个成员为该节点中的一个物理页面,于是整个数组就对应了该节点中所有的物理页面;

    struct page_cgroup *node_page_cgroup;

    &mdash;&mdash;用于管理<span style="-ms-word-wrap: break-word;">page_cgroup</span>,原来的<span style="-ms-word-wrap: break-word;">page_cgroup</span>是<span style="-ms-word-wrap: break-word;">page</span>页面管理结构的一个成员,现在移到这里了,它将会在初始化时所有的<span style="-ms-word-wrap: break-word;">page_cgroup</span>都将申请下来;

    struct bootmem_data *bdata;

    &mdash;&mdash;该数据指向<span style="-ms-word-wrap: break-word;">bootmem_node_data</span>,可以通过<span style="-ms-word-wrap: break-word;">system.map</span>查到。原是用于<span style="-ms-word-wrap: break-word;">bootmem</span>内存分配器的信息存储,当前改用<span style="-ms-word-wrap: break-word;">memblock</span>算法,则不存在该成员;

    unsigned long node_start_pfn;

    &mdash;&mdash;指向当前<span style="-ms-word-wrap: break-word;">pg_data_t</span>结构管理的物理起始页面;

    unsigned long node_present_pages;

    &mdash;&mdash;记录物理页面数总量,除开内存空洞的物理页面数;

    unsigned long node_spanned_pages;

    &mdash;&mdash;最大和最小页面号的差值,包括内存空洞的总的物理页面大小;

    int node_id;

    &mdash;&mdash;<span style="-ms-word-wrap: break-word;">pg_data_t</span>对应的索引号,非<span style="-ms-word-wrap: break-word;">NUMA</span>架构下该值为<span style="-ms-word-wrap: break-word;">0</span>;

    nodemask_t reclaim_nodes;

    &mdash;&mdash;用于记录可回收的内存管理节点<span style="-ms-word-wrap: break-word;">node</span>信息;

    wait_queue_head_t kswapd_wait;

    &mdash;&mdash;<span style="-ms-word-wrap: break-word;">kswapd</span>是页面交换守护线程,该线程会阻塞在这个等待队列,当满足条件后,调用<span style="-ms-word-wrap: break-word;">wake_up_interruptible()</span>唤醒该队列进行相关操作;

    wait_queue_head_t pfmemalloc_wait;

    &mdash;&mdash;用于减缓内存直接回收;

    struct task_struct *kswapd;

    &mdash;&mdash;指向<span style="-ms-word-wrap: break-word;">kswapd</span>守护线程的任务指针;

    int kswapd_max_order;

    &mdash;&mdash;用于表示<span style="-ms-word-wrap: break-word;">kswapd</span>守护线程每次回收的页面个数;

    enum zone_type classzone_idx;

    &mdash;&mdash;该成员与<span style="-ms-word-wrap: break-word;">kswapd</span>有关;

<div class="codeText" id="codeText" style="background: rgb(255, 255, 255); font: 12px/normal Consolas, monospace; margin: 0px 0px 1.1em; padding: 0px; border: 1px solid rgb(221, 221, 221); border-image: none; width: 1252.19px; letter-spacing: 0.1px; overflow: auto; -ms-word-break: break-all; -ms-word-wrap: break-word; font-size-adjust: none; font-stretch: normal;">
  1. 【file:/include/linux/mmzone.h】
  2. struct zone {
  3.     / Fields commonly accessed by the page allocator /
  4.  
  5.     / zone watermarks, access with _wmark_pages(zone) macros */
  6.     unsigned long watermark[NR_WMARK];
  7.  
  8.     /*
  9.      * When free pages are below this point, additional steps are taken
  10.      * when reading the number of free pages to avoid per-cpu counter
  11.      * drift allowing watermarks to be breached
  12.      */
  13.     unsigned long percpu_drift_mark;
  14.  
  15.     /*
  16.      * We don't know if the memory that we're going to allocate will be freeable
  17.      * or/and it will be released eventually, so to avoid totally wasting several
  18.      * GB of ram we must reserve some of the lower zone memory (otherwise we risk
  19.      * to run OOM on the lower zones despite there's tons of freeable ram
  20.      * on the higher zones). This array is recalculated at runtime if the
  21.      * sysctl_lowmem_reserve_ratio sysctl changes.
  22.      */
  23.     unsigned long lowmem_reserve[MAX_NR_ZONES];
  24.  
  25.     /*
  26.      * This is a per-zone reserve of pages that should not be
  27.      * considered dirtyable memory.
  28.      */
  29.     unsigned long dirty_balance_reserve;
  30.  
  31. #ifdef CONFIG_NUMA
  32.     int node;
  33.     /*
  34.      * zone reclaim becomes active if more unmapped pages exist.
  35.      */
  36.     unsigned long min_unmapped_pages;
  37.     unsigned long min_slab_pages;
  38. #endif
  39.     struct per_cpu_pageset __percpu *pageset;
  40.     /*
  41.      * free areas of different sizes
  42.      */
  43.     spinlock_t lock;
  44. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  45.     / Set to true when the PG_migrate_skip bits should be cleared /
  46.     bool compact_blockskip_flush;
  47.  
  48.     / pfns where compaction scanners should start /
  49.     unsigned long compact_cached_free_pfn;
  50.     unsigned long compact_cached_migrate_pfn;
  51. #endif
  52. #ifdef CONFIG_MEMORY_HOTPLUG
  53.     / see spanned/present_pages for more description /
  54.     seqlock_t span_seqlock;
  55. #endif
  56.     struct free_area free_area[MAX_ORDER];
  57.  
  58. #ifndef CONFIG_SPARSEMEM
  59.     /*
  60.      * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
  61.      * In SPARSEMEM, this map is stored in struct mem_section
  62.      */
  63.     unsigned long *pageblock_flags;
  64. #endif / CONFIG_SPARSEMEM /
  65.  
  66. #ifdef CONFIG_COMPACTION
  67.     /*
  68.      * On compaction failure, 1<<compact_defer_shift compactions
  69.      * are skipped before trying again. The number attempted since
  70.      * last failure is tracked with compact_considered.
  71.      */
  72.     unsigned int compact_considered;
  73.     unsigned int compact_defer_shift;
  74.     int compact_order_failed;
  75. #endif
  76.  
  77.     ZONE_PADDING(pad1)
  78.  
  79.     / Fields commonly accessed by the page reclaim scanner /
  80.     spinlock_t lru_lock;
  81.     struct lruvec lruvec;
  82.  
  83.     unsigned long pages_scanned; / since last reclaim /
  84.     unsigned long flags; / zone flags, see below /
  85.  
  86.     / Zone statistics /
  87.     atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  88.  
  89.     /*
  90.      * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
  91.      * this zone's LRU. Maintained by the pageout code.
  92.      */
  93.     unsigned int inactive_ratio;
  94.  
  95.  
  96.     ZONE_PADDING(pad2)
  97.     / Rarely used or read-mostly fields /
  98.  
  99.     /*
  100.      * wait_table -- the array holding the hash table
  101.      * wait_table_hash_nr_entries -- the size of the hash table array
  102.      * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
  103.      *
  104.      * The purpose of all these is to keep track of the people
  105.      * waiting for a page to become available and make them
  106.      * runnable again when possible. The trouble is that this
  107.      * consumes a lot of space, especially when so few things
  108.      * wait on pages at a given time. So instead of using
  109.      * per-page waitqueues, we use a waitqueue hash table.
  110.      *
  111.      * The bucket discipline is to sleep on the same queue when
  112.      * colliding and wake all in that wait queue when removing.
  113.      * When something wakes, it must check to be sure its page is
  114.      * truly available, a la thundering herd. The cost of a
  115.      * collision is great, but given the expected load of the
  116.      * table, they should be so rare as to be outweighed by the
  117.      * benefits from the saved space.
  118.      *
  119.      * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  120.      * primary users of these fields, and in mm/page_alloc.c
  121.      * free_area_init_core() performs the initialization of them.
  122.      */
  123.     wait_queue_head_t * wait_table;
  124.     unsigned long wait_table_hash_nr_entries;
  125.     unsigned long wait_table_bits;
  126.  
  127.     /*
  128.      * Discontig memory support fields.
  129.      */
  130.     struct pglist_data *zone_pgdat;
  131.     / zone_start_pfn == zone_start_paddr >> PAGE_SHIFT /
  132.     unsigned long zone_start_pfn;
  133.  
  134.     /*
  135.      * spanned_pages is the total pages spanned by the zone, including
  136.      * holes, which is calculated as:
  137.      * spanned_pages = zone_end_pfn - zone_start_pfn;
  138.      *
  139.      * present_pages is physical pages existing within the zone, which
  140.      * is calculated as:
  141.      * present_pages = spanned_pages - absent_pages(pages in holes);
  142.      *
  143.      * managed_pages is present pages managed by the buddy system, which
  144.      * is calculated as (reserved_pages includes pages allocated by the
  145.      * bootmem allocator):
  146.      * managed_pages = present_pages - reserved_pages;
  147.      *
  148.      * So present_pages may be used by memory hotplug or memory power
  149.      * management logic to figure out unmanaged pages by checking
  150.      * (present_pages - managed_pages). And managed_pages should be used
  151.      * by page allocator and vm scanner to calculate all kinds of watermarks
  152.      * and thresholds.
  153.      *
  154.      * Locking rules:
  155.      *
  156.      * zone_start_pfn and spanned_pages are protected by span_seqlock.
  157.      * It is a seqlock because it has to be read outside of zone->lock,
  158.      * and it is done in the main allocator path. But, it is written
  159.      * quite infrequently.
  160.      *
  161.      * The span_seq lock is declared along with zone->lock because it is
  162.      * frequently read in proximity to zone->lock. It's good to
  163.      * give them a chance of being in the same cacheline.
  164.      *
  165.      * Write access to present_pages at runtime should be protected by
  166.      * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't
  167.      * tolerant drift of present_pages should hold memory hotplug lock to
  168.      * get a stable value.
  169.      *
  170.      * Read access to managed_pages should be safe because it's unsigned
  171.      * long. Write access to zone->managed_pages and totalram_pages are
  172.      * protected by managed_page_count_lock at runtime. Idealy only
  173.      * adjust_managed_page_count() should be used instead of directly
  174.      * touching zone->managed_pages and totalram_pages.
  175.      */
  176.     unsigned long spanned_pages;
  177.     unsigned long present_pages;
  178.     unsigned long managed_pages;
  179.  
  180.     /*
  181.      * Number of MIGRATE_RESEVE page block. To maintain for just
  182.      * optimization. Protected by zone->lock.
  183.      */
  184.     int nr_migrate_reserve_block;
  185.  
  186.     /*
  187.      * rarely used fields:
  188.      */
  189.     const char *name;
  190. } ____cacheline_internodealigned_in_smp;

unsigned long watermark[NR_WMARK];

——该数组有三个值WMARK_MINWMARK_LOWWMARK_HIGH,如命名所标识,min最小,low居中,high最大。内存分配过程中,当空闲页面达到low时,内存分配器会唤醒kswapd守护进程来回收物理页面;当空闲页面达到min时,内存分配器就会唤醒kswapd以同步方式回收;如果kswapd被唤醒后,空闲页面达到high时,则会使kswapd再次休眠;

unsigned long percpu_drift_mark;

——当空闲页面低于该值,将会引发附加操作的执行,用于避免前面的watermark被冲破;

unsigned long lowmem_reserve[MAX_NR_ZONES];

——记录每个管理区中必须保留的物理页面数,以用于紧急状况下的内存分配;

unsigned long dirty_balance_reserve;

——用于表示不会被内存分配器分配出去的空闲页面部分的近似值;

struct per_cpu_pageset __percpu *pageset;

——该数组里面的成员pcp用于实现冷热页面的管理;

spinlock_t lock;

——spinlock锁,用于解决该管理区的并发问题;

struct free_area free_area[MAX_ORDER];

——主要用于Buddy内存管理算法(伙伴算法);

unsigned long *pageblock_flags;

——与伙伴算法的碎片迁移算法有关;

spinlock_t lru_lock;

——用于保护lruvec结构数据;

struct lruvec lruvec;

——lruvec该数组里面有一个lists是用于lru管理的链表,另外有一个reclaim_stat用于页面回收的状态标示;

unsigned long pages_scanned;

——用于记录上次物理页面回收时,扫描过的页描述符总数;

unsigned long flags;

——用于表示当前内存管理区的状态;

atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];

——用于统计该内存管理区中各项状态的数值;

unsigned int inactive_ratio;

——不活跃的页面比例;

wait_queue_head_t *wait_table;

unsigned long wait_table_hash_nr_entries;

unsigned long wait_table_bits;

——当多个进程同时访问同一页面时,必然会有进程先行访问操作,此时该页面不可用,因此其他的则需阻塞等待。当页面不可用时,则会将页面进行hash运算加入到该管理区wait_table的哈希表中,当页面可用时将会把里面任务列表中等待的进程进行唤醒。如果存在多个页面有相同的hash值,那么这些等待不同页面的任务仍然会睡眠在同一个hash表节点下,当相同hash值的某个页面可用时,将会唤醒所有进程,当进程在唤醒时需要检查是否是自己所等待的页面。其中wait_table_hash_nr_entries表示该哈希表中等待队列的数量,

struct pglist_data  *zone_pgdat;

——指向该内存管理区的pg_data_list

unsigned long zone_start_pfn;

——记录当前内存管理区中最小的物理页面号;

unsigned long spanned_pages;

——记录内存管理区的总页面数,包括内存空洞的页面数,实则上是管理区末尾页面号和起始页面号的差值;

unsigned long present_pages;

——除去内存空洞后的内存管理区实际有效的总页面数;

unsigned long managed_pages;

——用于记录被内存管理算法管理的物理页面数,这是除去了在初始化阶段被申请的页面;

int nr_migrate_reserve_block;

——用于优化的,记录内存迁移保留的页面数;

const char *name;

——用于记录该管理区的名字;

  • 【file:/include/linux/mmzone.h】

  • /*
  •  * Each physical page in the system has a struct page associated with
  •  * it to keep track of whatever it is we are using the page for at the
  •  * moment. Note that we have no way to track which tasks are using
  •  * a page, though if it is a pagecache page, rmap structures can tell us
  •  * who is mapping it.
  •  *
  •  * The objects in struct page are organized in double word blocks in
  •  * order to allows us to use atomic double word operations on portions
  •  * of struct page. That is currently only used by slub but the arrangement
  •  * allows the use of atomic double word operations on the flags/mapping
  •  * and lru list pointers also.
  •  */
  • struct page {
  •     / First double word block /
  •     unsigned long flags; /* Atomic flags, some possibly
  •                       updated asynchronously /
  •     union {
  •         struct address_space mapping; / If low bit clear, points to
  •                          * inode address_space, or NULL.
  •                          * If page mapped as anonymous
  •                          * memory, low bit is set, and
  •                          * it points to anon_vma object:
  •                          * see PAGE_MAPPING_ANON below.
  •                          */
  •         void s_mem; / slab first object */
  •     };
  •  
  •     / Second double word /
  •     struct {
  •         union {
  •             pgoff_t index; / Our offset within mapping. /
  •             void freelist; / sl[aou]b first free object */
  •             bool pfmemalloc; /* If set by the page allocator,
  •                          * ALLOC_NO_WATERMARKS was set
  •                          * and the low watermark was not
  •                          * met implying that the system
  •                          * is under some pressure. The
  •                          * caller should try ensure
  •                          * this page is only used to
  •                          * free other pages.
  •                          */
  •         };
  •  
  •         union {
  • #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
  •     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
  •             / Used for cmpxchg_double in slub /
  •             unsigned long counters;
  • #else
  •             /*
  •              * Keep _count separate from slub cmpxchg_double data.
  •              * As the rest of the double word is protected by
  •              * slab_lock but _count is not.
  •              */
  •             unsigned counters;
  • #endif
  •  
  •             struct {
  •  
  •                 union {
  •                     /*
  •                      * Count of ptes mapped in
  •                      * mms, to show when page is
  •                      * mapped & limit reverse map
  •                      * searches.
  •                      *
  •                      * Used also for tail pages
  •                      * refcounting instead of
  •                      * _count. Tail pages cannot
  •                      * be mapped and keeping the
  •                      * tail page _count zero at
  •                      * all times guarantees
  •                      * get_page_unless_zero() will
  •                      * never succeed on tail
  •                      * pages.
  •                      */
  •                     atomic_t _mapcount;
  •  
  •                     struct { / SLUB /
  •                         unsigned inuse:16;
  •                         unsigned objects:15;
  •                         unsigned frozen:1;
  •                     };
  •                     int units; / SLOB /
  •                 };
  •                 atomic_t _count; / Usage count, see below. /
  •             };
  •             unsigned int active; / SLAB /
  •         };
  •     };
  •  
  •     / Third double word block /
  •     union {
  •         struct list_head lru; /* Pageout list, eg. active_list
  •                      * protected by zone->lru_lock !
  •                      */
  •         struct { / slub per cpu partial pages /
  •             struct page next; / Next partial slab */
  • #ifdef CONFIG_64BIT
  •             int pages; / Nr of partial slabs left /
  •             int pobjects; / Approximate # of objects /
  • #else
  •             short int pages;
  •             short int pobjects;
  • #endif
  •         };
  •  
  •         struct list_head list; / slobs list of pages /
  •         struct slab slab_page; / slab fields */
  •         struct rcu_head rcu_head; /* Used by SLAB
  •                          * when destroying via RCU
  •                          */
  • #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
  •         pgtable_t pmd_huge_pte; / protected by page->ptl /
  • #endif
  •     };
  •  
  •     / Remainder is not double word aligned /
  •     union {
  •         unsigned long private; /* Mapping-private opaque data:
  •                          * usually used for buffer_heads
  •                          * if PagePrivate set; used for
  •                          * swp_entry_t if PageSwapCache;
  •                          * indicates order in the buddy
  •                          * system if PG_buddy is set.
  •                          */
  • #if USE_SPLIT_PTE_PTLOCKS
  • #if ALLOC_SPLIT_PTLOCKS
  •         spinlock_t *ptl;
  • #else
  •         spinlock_t ptl;
  • #endif
  • #endif
  •         struct kmem_cache slab_cache; / SL[AU]B: Pointer to slab */
  •         struct page first_page; / Compound tail pages */
  •     };
  •  
  •     /*
  •      * On machines where all RAM is mapped into kernel address space,
  •      * we can simply calculate the virtual address. On machines with
  •      * highmem some memory is mapped into kernel virtual memory
  •      * dynamically, so we need a place to store that address.
  •      * Note that this field could be 16 bits on x86 ... ;)
  •      *
  •      * Architectures with slow multiplication can define
  •      * WANT_PAGE_VIRTUAL in asm/page.h
  •      */
  • #if defined(WANT_PAGE_VIRTUAL)
  •     void virtual; / Kernel virtual address (NULL if
  •                        not kmapped, ie. highmem) */
  • #endif / WANT_PAGE_VIRTUAL /
  • #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
  •     unsigned long debug_flags; / Use atomic bitops on this /
  • #endif
  •  
  • #ifdef CONFIG_KMEMCHECK
  •     /*
  •      * kmemcheck wants to track the status of each byte in a page; this
  •      * is a pointer to such a status block. NULL if not tracked.
  •      */
  •     void *shadow;
  • #endif
  •  
  • #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
  •     int _last_cpupid;
  • #endif
  • }

  • (该结构很多union结构,主要是用于各种算法不同数据的空间复用,暂时记录部分常见的数据成员)

    unsigned long flags;

    ——用于记录页框的类型;

    struct address_space *mapping;

    ——用于区分该页是映射页框还是匿名页框;

    atomic_t _mapcount;

    ——记录了系统中页表有多少项指向该页;

    atomic_t _count;

    ——当前系统对该页面的引用次数;

    struct list_head lru;

    ——当页框处于分配状态时,该成员用于zonelruvec里面的list,当页框未被分配时则用于伙伴算法;

    unsigned long private;

    ——指向“私有”数据的指针。根据页的用途,可以用不同的方式使用该指针,通常用于与数据缓冲区关联起来;

    void *virtual;

    ——用于高端内存区域的页,即用于无法直接映射的页,该成员用于存储该页的虚拟地址;