题外语:本人对linux内核的了解尚浅,如果有差池欢迎指正,也欢迎提问交流!

 

 

首先要理解一下每一个进程是如何维护自己独立的寻址空间的,我的电脑里呢是8G内存空间。了解过的朋友应该都知道这是虚拟内存技术解决的这个问题,然而再linux中具体是怎样的模型解决的操作系统的这个设计需求的呢,让我们从linux源码的片段开始看吧!(以下内核源码均来自fedora21 64位系统的fc-3.19.3版本内核

<include/linux/mm_type.h>中对于物理页面的定义struct page,也就是我们常说的页表,关于这里的结构体的每个变量/位的操作函数大部分在<include/linux/mm.h>中。

 

  1 struct page {
  2     /* First double word block */
  3     unsigned long flags;        /* Atomic flags, some possibly
  4                      * updated asynchronously */
  5     union {
  6         struct address_space *mapping;    /* If low bit clear, points to
  7                          * inode address_space, or NULL.
  8                          * If page mapped as anonymous
  9                          * memory, low bit is set, and
 10                          * it points to anon_vma object:
 11                          * see PAGE_MAPPING_ANON below.
 12                          */
 13         void *s_mem;            /* slab first object */
 14     };
 15 
 16     /* Second double word */
 17     struct {
 18         union {
 19             pgoff_t index;        /* Our offset within mapping. */
 20             void *freelist;        /* sl[aou]b first free object */
 21             bool pfmemalloc;    /* If set by the page allocator,
 22                          * ALLOC_NO_WATERMARKS was set
 23                          * and the low watermark was not
 24                          * met implying that the system
 25                          * is under some pressure. The
 26                          * caller should try ensure
 27                          * this page is only used to
 28                          * free other pages.
 29                          */
 30         };
 31 
 32         union {
 33 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 34     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 35             /* Used for cmpxchg_double in slub */
 36             unsigned long counters;
 37 #else
 38             /*
 39              * Keep _count separate from slub cmpxchg_double data.
 40              * As the rest of the double word is protected by
 41              * slab_lock but _count is not.
 42              */
 43             unsigned counters;
 44 #endif
 45 
 46             struct {
 47 
 48                 union {
 49                     /*
 50                      * Count of ptes mapped in
 51                      * mms, to show when page is
 52                      * mapped & limit reverse map
 53                      * searches.
 54                      *
 55                      * Used also for tail pages
 56                      * refcounting instead of
 57                      * _count. Tail pages cannot
 58                      * be mapped and keeping the
 59                      * tail page _count zero at
 60                      * all times guarantees
 61                      * get_page_unless_zero() will
 62                      * never succeed on tail
 63                      * pages.
 64                      */
 65                     atomic_t _mapcount;
 66 
 67                     struct { /* SLUB */
 68                         unsigned inuse:16;
 69                         unsigned objects:15;
 70                         unsigned frozen:1;
 71                     };
 72                     int units;    /* SLOB */
 73                 };
 74                 atomic_t _count;        /* Usage count, see below. */
 75             };
 76             unsigned int active;    /* SLAB */
 77         };
 78     };
 79 
 80     /* Third double word block */
 81     union {
 82         struct list_head lru;    /* Pageout list, eg. active_list
 83                      * protected by zone->lru_lock !
 84                      * Can be used as a generic list
 85                      * by the page owner.
 86                      */
 87         struct {        /* slub per cpu partial pages */
 88             struct page *next;    /* Next partial slab */
 89 #ifdef CONFIG_64BIT
 90             int pages;    /* Nr of partial slabs left */
 91             int pobjects;    /* Approximate # of objects */
 92 #else
 93             short int pages;
 94             short int pobjects;
 95 #endif
 96         };
 97 
 98         struct slab *slab_page; /* slab fields */
 99         struct rcu_head rcu_head;    /* Used by SLAB
100                          * when destroying via RCU
101                          */
102 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
103         pgtable_t pmd_huge_pte; /* protected by page->ptl */
104 #endif
105     };
106 
107     /* Remainder is not double word aligned */
108     union {
109         unsigned long private;        /* Mapping-private opaque data:
110                           * usually used for buffer_heads
111                          * if PagePrivate set; used for
112                          * swp_entry_t if PageSwapCache;
113                          * indicates order in the buddy
114                          * system if PG_buddy is set.
115                          */
116 #if USE_SPLIT_PTE_PTLOCKS
117 #if ALLOC_SPLIT_PTLOCKS
118         spinlock_t *ptl;
119 #else
120         spinlock_t ptl;
121 #endif
122 #endif
123         struct kmem_cache *slab_cache;    /* SL[AU]B: Pointer to slab */
124         struct page *first_page;    /* Compound tail pages */
125     };
126 
127 #ifdef CONFIG_MEMCG
128     struct mem_cgroup *mem_cgroup;
129 #endif
130 
131     /*
132      * On machines where all RAM is mapped into kernel address space,
133      * we can simply calculate the virtual address. On machines with
134      * highmem some memory is mapped into kernel virtual memory
135      * dynamically, so we need a place to store that address.
136      * Note that this field could be 16 bits on x86 ... ;)
137      *
138      * Architectures with slow multiplication can define
139      * WANT_PAGE_VIRTUAL in asm/page.h
140      */
141 #if defined(WANT_PAGE_VIRTUAL)
142     void *virtual;            /* Kernel virtual address (NULL if
143                        not kmapped, ie. highmem) */
144 #endif /* WANT_PAGE_VIRTUAL */
145 
146 #ifdef CONFIG_KMEMCHECK
147     /*
148      * kmemcheck wants to track the status of each byte in a page; this
149      * is a pointer to such a status block. NULL if not tracked.
150      */
151     void *shadow;
152 #endif
153 
154 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
155     int _last_cpupid;
156 #endif
157 }
View Code

相关文章: