1

在调用mmap系统调用时,可以指定的标志(flag)参数:

/* Share changes */
/* Changes are private */
/* Mask for type of mapping */
/* Interpret addr exactly */
/* don't use a file */
#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
/* For anonymous mmap, memory could be uninitialized */
#else
/* Don't support this flag */
#endif

MAP_SHARED

 

用于多个进程共享对一个文件的访问

MAP_PRIVATE

 

用于创建一个与数据源分离的私有映射,对区域的写入操作不影响数据源文件中的内容

MAP_FIXED

 

用于在指定的目标线性地址创建一个映射,不允许调整到其他地址

MAP_ANONYMOUS

 

用于创建与文件无关的映射,或者说没有数据源的映射

do_anonymous_page会调用alloc_zeroed_user_highpage_movable分配一个初始化为全0的内存页。

2

 

在vm_area_struct数据结构定义中,有一个双链表结点:anon_vma_chain

   1: struct vm_area_struct {
   2: ......
   3: /*
   4:      * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
   5:      * list, after a COW of one of the file pages.    A MAP_SHARED vma
   6:      * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
   7:      * or brk vma (with NULL file) can only be in an anon_vma list.
   8:      */
   9:     struct list_head anon_vma_chain; /* Serialized by mmap_sem &
  10:                       * page_table_lock */
/* Serialized by page_table_lock */
  12: ......
  13: }

其中,struct anon_vma定义:

   1: /*
   2:  * The anon_vma heads a list of private "related" vmas, to scan if
   3:  * an anonymous page pointing to this anon_vma needs to be unmapped:
   4:  * the vmas on the list will be related by forking, or by splitting.
   5:  *
   6:  * Since vmas come and go as they are split and merged (particularly
   7:  * in mprotect), the mapping field of an anonymous page cannot point
   8:  * directly to a vma: instead it points to an anon_vma, on whose list
   9:  * the related vmas can be easily linked or unlinked.
  10:  *
  11:  * After unlinking the last vma on the list, we must garbage collect
  12:  * the anon_vma object itself: we're guaranteed no page can be
  13:  * pointing to this anon_vma once its vma list is empty.
 */
struct anon_vma {
/* Root of this anon_vma tree */
/* Serialize access to vma list */
/*
     * The refcount is taken on an anon_vma when there is no
     * guarantee that the vma of page tables will exist for
     * the duration of the operation. A caller that takes
     * the reference is responsible for clearing up the
     * anon_vma if they are the last user on release
     */
  25:     atomic_t refcount;
  26:  
/*
     * NOTE: the LSB of the head.next is set by
     * mm_take_all_locks() _after_ taking the above lock. So the
     * head must only be read/written after taking the above lock
     * to be sure to see a valid next pointer. The LSB bit itself
     * is serialized by a system wide lock only visible to
     * mm_take_all_locks() (mm_all_locks_mutex).
     */
  36: };

3

do_mmap

   1: static inline unsigned long do_mmap(struct file *file, unsigned long addr,
   2:     unsigned long len, unsigned long prot,
   3:     unsigned long flag, unsigned long offset)
   4: {
   5:     unsigned long ret = -EINVAL;
   6:     if ((offset + PAGE_ALIGN(len)) < offset)
   7:         goto out;
   8:     if (!(offset & ~PAGE_MASK))
   9:         ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
  10: out:
  11:     return ret;
  12: }

if ((offset + PAGE_ALIGN(len)) < offset)

 

/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

/*
* 'kernel.h' contains some often-used function prototypes etc
*/
#define __ALIGN_KERNEL(x, a)        __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask)   

if ((offset + (((len) + (PAGE_SIZE)) & ~(PAGE_SIZE-1))) < offset)

 

表示如果len太长,再进行align to page boundary操作就会溢出了,那么没有那么多的线性地址空间可以给它映射,因此失败。

if (!(offset & ~PAGE_MASK))

 

如果offset是位于页的边界处,则继续操作

ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);

 

其中最后一个参数代表了映射区域在文件中的页序号。

   1: /*
   2:  * The caller must hold down_write(&current->mm->mmap_sem).
   3:  */
   4:  
   5: unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
   6:             unsigned long len, unsigned long prot,
   7:             unsigned long flags, unsigned long pgoff)
   8: {
   9:     struct mm_struct * mm = current->mm;
  10:     struct inode *inode;
  11:     vm_flags_t vm_flags;
  12:     int error;
  13:     unsigned long reqprot = prot;
  14:  
  15:     /*
  16:      * Does the application expect PROT_READ to imply PROT_EXEC?
  17:      *
  18:      * (the exception is when the underlying filesystem is noexec
  19:      *  mounted, in which case we dont add PROT_EXEC.)
  20:      */
  21:     if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
  22:         if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
  23:             prot |= PROT_EXEC;
  24:  
  25:     if (!len)
  26:         return -EINVAL;
  27:  
  28:     if (!(flags & MAP_FIXED))
  29:         addr = round_hint_to_min(addr);
  30:  
  31:     /* Careful about overflows.. */
  32:     len = PAGE_ALIGN(len);
  33:     if (!len)
  34:         return -ENOMEM;
  35:  
  36:     /* offset overflow? */
if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
return -EOVERFLOW;
  39:  
/* Too many mappings? */
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
  43:  
/* Obtain the address to map to. we verify (or select) it and ensure
     * that it represents a valid section of the address space.
     */
  47:     addr = get_unmapped_area(file, addr, len, pgoff, flags);
if (addr & ~PAGE_MASK)
return addr;
  50:  
/* Do simple checking here so the lower-level routines won't have
     * to. we assume access permissions have been handled by the open
     * of the memory object, so we don't do any here.
     */
  55:     vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
  56:             mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  57:  
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
  61:  
/* mlock MCL_FUTURE? */
if (vm_flags & VM_LOCKED) {
long locked, lock_limit;
  65:         locked = len >> PAGE_SHIFT;
  66:         locked += mm->locked_vm;
  67:         lock_limit = rlimit(RLIMIT_MEMLOCK);
  68:         lock_limit >>= PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
  71:     }
  72:  
  73:     inode = file ? file->f_path.dentry->d_inode : NULL;
  74:  
if (file) {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
return -EACCES;
  80:  
/*
             * Make sure we don't allow writing to an append-only
             * file..
             */
if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
return -EACCES;
  87:  
/*
             * Make sure there are no mandatory locks on the file.
             */
if (locks_verify_locked(inode))
return -EAGAIN;
  93:  
  94:             vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
  96:                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
  97:  
/* fall through */
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
return -EACCES;
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
if (vm_flags & VM_EXEC)
return -EPERM;
 105:                 vm_flags &= ~VM_MAYEXEC;
 106:             }
 107:  
if (!file->f_op || !file->f_op->mmap)
return -ENODEV;
break;
 111:  
default:
return -EINVAL;
 114:         }
else {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
/*
             * Ignore pgoff.
             */
 121:             pgoff = 0;
 122:             vm_flags |= VM_SHARED | VM_MAYSHARE;
break;
case MAP_PRIVATE:
/*
             * Set pgoff according to addr for anon_vma.
             */
 128:             pgoff = addr >> PAGE_SHIFT;
break;
default:
return -EINVAL;
 132:         }
 133:     }
 134:  
 135:     error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
if (error)
return error;
 138:  
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 140: }
 141: EXPORT_SYMBOL(do_mmap_pgoff);

/* Obtain the address to map to. we verify (or select) it and ensure
     * that it represents a valid section of the address space.
     */
    addr = get_unmapped_area(file, addr, len, pgoff, flags);
    if (addr & ~PAGE_MASK)
        return addr;

get_unmapped_area函数用于查找到一个可以安放请求的这么长的一个vma的线性地址范围,返回这个范围的起始地址。如果这个起始地址不是从页对齐处开始的,代表找到的这个地址是不符合要求的,因此也不再往下走了,直接返回。

但是是问题是,如果直接返回了,那么调用都会不会不做检查,直接认为内核已经完成了mmap的操作,而尝试去读写这块还没有与文件建立起关联的内存区域呢,会发生什么不可知的事?

 

【根据http://www.cnblogs.com/long123king/p/3502170.html中的思想,当进程真正需要访问页时,会触发Page Fault,那么这一步关键是设置好相应的Page Fault handler以及相应struct的指针成员】

相关文章: