EN对VM_MIXEDMAP的最好解释,以及对VM_PFNMAP的间接解释,来自引入它的补丁提交的描述:http://visa.lab.asu.edu/gitlab/fstrace/android-kernel-msm-hammerhead-3.4-marshmallow-mr3/commit/b379d790197cdf8a95fb67507d75a24ac0a1678d 最近朋友圈被
*/ const struct vm_operations_struct *vm_ops; //12 定义了操作vma的一组函数指针 /* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units */ //13 这个vma在文件内的offset struct file * vm_file; /* File we map to (can be ...
*/ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; // 为这段地址范围内的所有 vma 分配物理内存 ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (...
*/struct vm_operations_struct{void(*open)(struct vm_area_struct*area);void(*close)(struct vm_area_struct*area);int(*mremap)(struct vm_area_struct*area);int(*fault)(struct vm_fault*vmf);int(*huge_fault)(struct vm_fault*vmf,enumpage_entry_size pe_size);void(*map_pages)(struct vm_f...
vmemmap+pfn的地址就是此struct page对应的地址。 Linux分区页框分配器 页框分配在内核里的机制我们叫做分区页框分配器(zoned page frame allocator),在linux系统中,分区页框分配器管理着所有物理内存,无论你是内核还是进程,都需要请求分区页框分配器,这时才会分配给你应该获得的物理内存页框。当你所拥有的页框不...
void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 下面对vm_operations_struct结构体成员进行分析 ; 1、open 函数指针 open函数指针 , 指向的函数 , 在创建 " 虚拟内存区域 "时调用 ; ...
2489 |* VM_PFNMAP VMA. 2490 |* 2491 |* We should not cow pages in a shared writeable mapping. 2492 |* Just mark the pages writable and/or call ops->pfn_mkwrite. 2493 |*/ 2494 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == ...
/* 通过/proc/sys/vm/zone_reclaim_mode文件设置是否允许将脏页回写到磁盘,即使设为允许,快速内存回收也不能对脏文件页进行回写操作。 *当zone_reclaim_mode为0时,在这里是不允许页框回写的, */ .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), ...
kernel image map:将kernel运行需要的地址(kernel txt、rodata、data、bss等等)进行映射。 arch/arm64/kernel/head.S: ENTRY(stext) blpreserve_boot_args blel2_setup//DroptoEL1,w0=cpu_boot_mode adrpx23,__PHYS_OFFSET andx23,x23,MIN_KIMG_ALIGN-1//KASLRoffset,defaultsto0 ...
map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY_RWX; create_mapping(&map); } else if (start >= kernel_x_end) { map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); ...