|
关于kvm中的内存
1.kvm_vcpu_init->kvm_arch_vcpu_init-> kvm_mmu_create
kvm_mmu_create alloc_mmu_pages(vcpu):
alloc_mmu_pages
1 vcpu->arch.mmu.pae_root = page_address(page);//这里是很重要的pae_root在后面会用到
2 for (i = 0; i < 4; ++i)
3 vcpu->arch.mmu.pae_root = INVALID_PAGE
2.kvm_arhc_vcpu_setup()->kvm_mmu_steup()->init_kvm_mmu()->init_kvm_tdp_mmu(EPT开启的情况调用这个函数)
???在哪里看了一篇文章说不管什么情况下都按照PAE的情况处理还是???需要继续查询下
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.walk_mmu;
context->base_role.word = 0;
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
context->root_hpa = INVALID_PAGE;//这里是很重要的,因为根页表INVALID_PAGE
context->direct_map = true;//\_\_direct_map。spte 即 shadow page table entry。
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;//vmx_set_cr3
context->get_cr3 = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;//这里会被用到吗?
if (!is_paging(vcpu)) {
context->nx = false;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->root_level = 0;
} else if (is_long_mode(vcpu)) {
context->nx = is_nx(vcpu);
context->root_level = PT64_ROOT_LEVEL;//这里代表的是64为地址
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else if (is_pae(vcpu)) {//这里好像有个问题
context->nx = is_nx(vcpu);
context->root_level = PT32E_ROOT_LEVEL;//32位开启pae
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else {
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;//32位非pae
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging32_gva_to_gpa;
}
update_permission_bitmask(vcpu, context);
update_last_pte_bitmap(vcpu, context);
return 0;
}
3.kvm_arch_vcpu_ioctl_run->__vcpu_run->vcpu_enter_guest(vcpu)->kvm_mmu_reload->kvm_mmu_load(vcpu)
int kvm_mmu_load(struct kvm_vcpu *vcpu)
{
int r;
r = mmu_topup_memory_caches(vcpu);
if (r)
goto out;
r = mmu_alloc_roots(vcpu);/*这里应该是关键的地方*/
spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);//这里是同步,这里的必要性
spin_unlock(&vcpu->kvm->mmu_lock);
if (r)
goto out;
/* set_cr3() should ensure TLB has been flushed */
vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);//vcpu中cr3寄存器的地址要指向这个根页表的物理地址
out:
return r;
}
这里判断的条件是if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))/*也就是说如果root_hpa为还为INVALID_PAGE 这在后面的tdp_page_fault这个函数中用到,应该是EPT表的地址*/
return 0;->kvm_mmu_load()->mmu_alloc_roots(vcpu)()在这里初始化的vcpu->arch.mmu.root_hpa,root_hpa指向根页表的物理地址,因为在kvm_mmu_create中walk.mmu与arch.mmu指向了相同的结构,所以这里的初始化在后面tdp_page_fault可以直接使用。
mmu_alloc_roots()------|mmu_alloc_direct_roots(vcpu)针对不同情况,这里我猜测这种情况对应为开始EPT
|mmu_alloc_shadow_roots(vcpu)
kvm_mmu_page这个函数还没有分析
//这个函数还是需要分析,现在的问题是这个函数的几个参数意味着这是在做客户机的虚拟地址以及客户机的页框,但是为什么后面还是要分配实际的物理地址,这里要和tdp_page_fault连接起来,下一步需要知道的是tdp_page_fault中是如何使用这里分配的物理地址,并且如何更新SPTE,还有一个比较关键的问题是gust中crs指向的是不是实际的物理地址上的数据″(这点很重要),vm使用的spte在哪里?
eptep的设置时在:
vmx_set_cr3
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{/*这个函数在kvm_mmu_load 的vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
out:中被调用,这里主要完成的root_hpa的初始化,eptp初始化,针对开启ept的情况*/
unsigned long guest_cr3;
u64 eptp;
guest_cr3 = cr3;
if (enable_ept) {/*支持ept的情况*/
eptp = construct_eptp(cr3);
vmcs_write64(EPT_POINTER, eptp);
guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
vcpu->kvm->arch.ept_identity_map_addr;
ept_load_pdptrs(vcpu);
}
vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, guest_cr3);
}
Ept页表的建立是通过:tdp_page_fault:(这里应该不涉及到客户机页表缺页的时候的问题,客户机页表没有映射时会调用其他函数这里需要找下)
tdp_page_fault:
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
bool prefault)
{
pfn_t pfn;
int r;
int level;
int force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;//客户机物理内存页框
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (unlikely(error_code & PFERR_RSVD_MASK))
return handle_mmio_page_fault(vcpu, gpa, error_code, true);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
if (likely(!force_pt_level)) {
level = mapping_level(vcpu, gfn);
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
} else
level = PT_PAGE_TABLE_LEVEL;
if (fast_page_fault(vcpu, gpa, level, error_code))
return 0;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return 0;//这个函数很大功能是得到gfn对应的 pfn主机物理页框
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable,//实际更新页表的时间
level, gfn, pfn, prefault);//PFN :主机物理地址页框 pfn:客户机物理地址页框
spin_unlock(&vcpu->kvm->mmu_lock);
return r;/**/
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
}
完成gpa->hpa的映射,也就是所有客户机的物理地址都需要经过ept的转换。
注意:
__direct_map函数中利用了for_each_shadow_entry(vcpu, (u64)gfn addr = addr;
iterator->shadow_addr = vcpu->arch.mmu.root_hpa;//这里和GUST对应的cr3中地址是一样
iterator->level = vcpu->arch.mmu.shadow_root_level;
if (iterator->level == PT64_ROOT_LEVEL &&
vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
!vcpu->arch.mmu.direct_map)
--iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) {
iterator->shadow_addr
= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level;
if (!iterator->shadow_addr)
iterator->level = 0;
}
}
其中使用了iterator->shadow_addr = vcpu->arch.mmu.root_hpa来遍历页表,这里实际遍历的是ept页表,(因为在kvm_mmu_load使用vmx_set_cr3添加了ept页表的指针),也就是说tdp_page_fault处理的是ept的问题。
tdp_page_fault()的调用过程
handle_ept_violation(struct kvm_vcpu *vcpu)->kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);->r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);//tdp_page_fault()
相应的客户机需要完成GVA->GPA
函数调用
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
struct x86_exception *exception)->static int FNAME(walk_addr_generic)(struct guest_walker *walker,struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,gva_t addr, u32 access)(1这个函数中需要注意是否有当出现了客户机页表fault的时候向客户机中注入fault的函数调用,2、gva_to_gpa这个函数是如何被客户机使用的)
问题的解决:在开启ept的情况下,是不会因为客户机的page_fault产生exit,ept完成的是GPA->HPA的转换,而GVA->GPA这个层次的转换时在客户机里面实现的不需要利用exit得到相应的处理,所以应该是不存在相应的客户机的注入fault的函数。但是这里存在的问题是如何才能调用这个完成GVA->GPA转换的函数,
还有一个问题就是
FNAME(walk_addr_generic)(struct guest_walker *walker,struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,gva_t addr, u32 access)现在卡在这个函数上了
1.vcpu->arch.cr3 这里是在哪里赋值的,CR3指向的到底是什么?
http://www.linux-kvm.org/page/Memory
The solution in software is something Linux calls mmu_notifiers. Since the qemu/kvm memory is normal Linux memory (from the host Linux kernel's perspective) the kernel may try to swap it, replace it, or even free it just like normal memory.
But, before the pages are actually given back to the host kernel for other use, the kvm/qemu guest is notified of the host's intentions. The kvm/qemu guest can then remove the page from the shadow page tables or the NPT/EPT structures. After the kvm/qemu guest has done this, the host kernel is then free to do what it wishes with the page.
下面是札记
KVM maintains a copy of Shadow Page Tables for each Virtual Machine it runs
TDP: Two Dimensional Paging ( A common term to EPT and NPT )
---恢复内容结束---
关于kvm中的内存
1.kvm_vcpu_init->kvm_arch_vcpu_init-> kvm_mmu_create
kvm_mmu_create alloc_mmu_pages(vcpu):
alloc_mmu_pages
1 vcpu->arch.mmu.pae_root = page_address(page);//这里是很重要的pae_root在后面会用到
2 for (i = 0; i < 4; ++i)
3 vcpu->arch.mmu.pae_root = INVALID_PAGE
2.kvm_arhc_vcpu_setup()->kvm_mmu_steup()->init_kvm_mmu()->init_kvm_tdp_mmu(EPT开启的情况调用这个函数)
???在哪里看了一篇文章说不管什么情况下都按照PAE的情况处理还是???需要继续查询下
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.walk_mmu;
context->base_role.word = 0;
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
context->root_hpa = INVALID_PAGE;//这里是很重要的,因为根页表INVALID_PAGE
context->direct_map = true;//\_\_direct_map。spte 即 shadow page table entry。
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;//vmx_set_cr3
context->get_cr3 = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;//这里会被用到吗?
if (!is_paging(vcpu)) {
context->nx = false;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->root_level = 0;
} else if (is_long_mode(vcpu)) {
context->nx = is_nx(vcpu);
context->root_level = PT64_ROOT_LEVEL;//这里代表的是64为地址
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else if (is_pae(vcpu)) {//这里好像有个问题
context->nx = is_nx(vcpu);
context->root_level = PT32E_ROOT_LEVEL;//32位开启pae
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else {
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;//32位非pae
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging32_gva_to_gpa;
}
update_permission_bitmask(vcpu, context);
update_last_pte_bitmap(vcpu, context);
return 0;
}
3.kvm_arch_vcpu_ioctl_run->__vcpu_run->vcpu_enter_guest(vcpu)->kvm_mmu_reload->kvm_mmu_load(vcpu)
int kvm_mmu_load(struct kvm_vcpu *vcpu)
{
int r;
r = mmu_topup_memory_caches(vcpu);
if (r)
goto out;
r = mmu_alloc_roots(vcpu);/*这里应该是关键的地方*/
spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);//这里是同步,这里的必要性
spin_unlock(&vcpu->kvm->mmu_lock);
if (r)
goto out;
/* set_cr3() should ensure TLB has been flushed */
vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);//vcpu中cr3寄存器的地址要指向这个根页表的物理地址
out:
return r;
}
这里判断的条件是if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))/*也就是说如果root_hpa为还为INVALID_PAGE 这在后面的tdp_page_fault这个函数中用到,应该是EPT表的地址*/
return 0;->kvm_mmu_load()->mmu_alloc_roots(vcpu)()在这里初始化的vcpu->arch.mmu.root_hpa,root_hpa指向根页表的物理地址,因为在kvm_mmu_create中walk.mmu与arch.mmu指向了相同的结构,所以这里的初始化在后面tdp_page_fault可以直接使用。
mmu_alloc_roots()------|mmu_alloc_direct_roots(vcpu)针对不同情况,这里我猜测这种情况对应为开始EPT
|mmu_alloc_shadow_roots(vcpu)
kvm_mmu_page这个函数还没有分析
//这个函数还是需要分析,现在的问题是这个函数的几个参数意味着这是在做客户机的虚拟地址以及客户机的页框,但是为什么后面还是要分配实际的物理地址,这里要和tdp_page_fault连接起来,下一步需要知道的是tdp_page_fault中是如何使用这里分配的物理地址,并且如何更新SPTE,还有一个比较关键的问题是gust中crs指向的是不是实际的物理地址上的数据″(这点很重要),vm使用的spte在哪里?
eptep的设置时在:
vmx_set_cr3
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{/*这个函数在kvm_mmu_load 的vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
out:中被调用,这里主要完成的root_hpa的初始化,eptp初始化,针对开启ept的情况*/
unsigned long guest_cr3;
u64 eptp;
guest_cr3 = cr3;
if (enable_ept) {/*支持ept的情况*/
eptp = construct_eptp(cr3);
vmcs_write64(EPT_POINTER, eptp);
guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
vcpu->kvm->arch.ept_identity_map_addr;
ept_load_pdptrs(vcpu);
}
vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, guest_cr3);
}
Ept页表的建立是通过:tdp_page_fault:(这里应该不涉及到客户机页表缺页的时候的问题,客户机页表没有映射时会调用其他函数这里需要找下)
tdp_page_fault:
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
bool prefault)
{
pfn_t pfn;
int r;
int level;
int force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;//客户机物理内存页框
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (unlikely(error_code & PFERR_RSVD_MASK))
return handle_mmio_page_fault(vcpu, gpa, error_code, true);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
if (likely(!force_pt_level)) {
level = mapping_level(vcpu, gfn);
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
} else
level = PT_PAGE_TABLE_LEVEL;
if (fast_page_fault(vcpu, gpa, level, error_code))
return 0;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return 0;//这个函数很大功能是得到gfn对应的 pfn主机物理页框
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable,//实际更新页表的时间
level, gfn, pfn, prefault);//PFN :主机物理地址页框 pfn:客户机物理地址页框
spin_unlock(&vcpu->kvm->mmu_lock);
return r;/**/
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
}
完成gpa->hpa的映射,也就是所有客户机的物理地址都需要经过ept的转换。
注意:
__direct_map函数中利用了for_each_shadow_entry(vcpu, (u64)gfn addr = addr;
iterator->shadow_addr = vcpu->arch.mmu.root_hpa;//这里和GUST对应的cr3中地址是一样
iterator->level = vcpu->arch.mmu.shadow_root_level;
if (iterator->level == PT64_ROOT_LEVEL &&
vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
!vcpu->arch.mmu.direct_map)
--iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) {
iterator->shadow_addr
= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level;
if (!iterator->shadow_addr)
iterator->level = 0;
}
}
其中使用了iterator->shadow_addr = vcpu->arch.mmu.root_hpa来遍历页表,这里实际遍历的是ept页表,(因为在kvm_mmu_load使用vmx_set_cr3添加了ept页表的指针),也就是说tdp_page_fault处理的是ept的问题。
tdp_page_fault()的调用过程
handle_ept_violation(struct kvm_vcpu *vcpu)->kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);->r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);//tdp_page_fault()
相应的客户机需要完成GVA->GPA
函数调用
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
struct x86_exception *exception)->static int FNAME(walk_addr_generic)(struct guest_walker *walker,struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,gva_t addr, u32 access)(1这个函数中需要注意是否有当出现了客户机页表fault的时候向客户机中注入fault的函数调用,2、gva_to_gpa这个函数是如何被客户机使用的)
问题的解决:在开启ept的情况下,是不会因为客户机的page_fault产生exit,ept完成的是GPA->HPA的转换,而GVA->GPA这个层次的转换时在客户机里面实现的不需要利用exit得到相应的处理,所以应该是不存在相应的客户机的注入fault的函数。但是这里存在的问题是如何才能调用这个完成GVA->GPA转换的函数,
还有一个问题就是
FNAME(walk_addr_generic)(struct guest_walker *walker,struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,gva_t addr, u32 access)现在卡在这个函数上了
1.vcpu->arch.cr3 这里是在哪里赋值的,CR3指向的到底是什么?
http://www.linux-kvm.org/page/Memory
The solution in software is something Linux calls mmu_notifiers. Since the qemu/kvm memory is normal Linux memory (from the host Linux kernel's perspective) the kernel may try to swap it, replace it, or even free it just like normal memory.
But, before the pages are actually given back to the host kernel for other use, the kvm/qemu guest is notified of the host's intentions. The kvm/qemu guest can then remove the page from the shadow page tables or the NPT/EPT structures. After the kvm/qemu guest has done this, the host kernel is then free to do what it wishes with the page.
下面是札记
KVM maintains a copy of Shadow Page Tables for each Virtual Machine it runs
TDP: Two Dimensional Paging ( A common term to EPT and NPT )
下面是分析影子页表:
handle_exception(struct kvm_vcpu *vcpu)->kvm_mmu_page_fault->static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
bool prefault)
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
509 bool prefault)
510 {
511 int write_fault = error_code & PFERR_WRITE_MASK;
512 int user_fault = error_code & PFERR_USER_MASK;
513 struct guest_walker walker;
514 int r;
515 pfn_t pfn;
516 int level = PT_PAGE_TABLE_LEVEL;
517 int force_pt_level;
518 unsigned long mmu_seq;
519 bool map_writable;
520
521 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
522
523 if (unlikely(error_code & PFERR_RSVD_MASK))
524 return handle_mmio_page_fault(vcpu, addr, error_code,
525 mmu_is_nested(vcpu));
526
527 r = mmu_topup_memory_caches(vcpu);
528 if (r)
529 return r;
530
531 /*
532 * Look up the guest pte for the faulting address.
533 */
534 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);//处理客户机页表
535
536 /*
537 * The page is not mapped by the guest. Let the guest handle it.
538 */
539 if (!r) {
540 pgprintk("%s: guest page fault\n", __func__);
541 if (!prefault)
542 inject_page_fault(vcpu, &walker.fault);
543
544 return 0;
545 }
546
547 if (walker.level >= PT_DIRECTORY_LEVEL)
548 force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
549 else
550 force_pt_level = 1;
551 if (!force_pt_level) {
552 level = min(walker.level, mapping_level(vcpu, walker.gfn));
553 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
554 }
555
556 mmu_seq = vcpu->kvm->mmu_notifier_seq;
557 smp_rmb();
558
559 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
560 &map_writable))
561 return 0;
562
563 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
564 walker.gfn, pfn, walker.pte_access, &r))
565 return r;
566
567 spin_lock(&vcpu->kvm->mmu_lock);
568 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
569 goto out_unlock;
570
571 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
572 kvm_mmu_free_some_pages(vcpu);
573 if (!force_pt_level)
574 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
575 r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
576 level, pfn, map_writable, prefault);
577 ++vcpu->stat.pf_fixed;
578 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
579 spin_unlock(&vcpu->kvm->mmu_lock);
580
581 return r;
582
583 out_unlock:
584 spin_unlock(&vcpu->kvm->mmu_lock);
585 kvm_release_pfn_clean(pfn);
586 return 0;
587 }
1 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
2 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3 gva_t addr, u32 access)//
4 {//
5 int ret;
6 pt_element_t pte;
7 pt_element_t __user *uninitialized_var(ptep_user);
8 gfn_t table_gfn;
9 unsigned index, pt_access, pte_access, accessed_dirty, shift;
10 gpa_t pte_gpa;
11 int offset;
12 const int write_fault = access & PFERR_WRITE_MASK;
13 const int user_fault = access & PFERR_USER_MASK;
14 const int fetch_fault = access & PFERR_FETCH_MASK;
15 u16 errcode = 0;
16 gpa_t real_gpa;
17 gfn_t gfn;
18
19 trace_kvm_mmu_pagetable_walk(addr, access);
20 retry_walk:
21 walker->level = mmu->root_level;
22 pte = mmu->get_cr3(vcpu);//这里调用的是get_cr3这个函数,这里一定 GPA
23
24 #if PTTYPE == 64
25 if (walker->level == PT32E_ROOT_LEVEL) {
26 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);//?kvm_pdptr_read
27 trace_kvm_mmu_paging_element(pte, walker->level);
28 if (!is_present_gpte(pte))
29 goto error;
30 --walker->level;
31 }
32 #endif
33 walker->max_level = walker->level;
34 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
35 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
36
37 accessed_dirty = PT_ACCESSED_MASK;
38 pt_access = pte_access = ACC_ALL;
39 ++walker->level;
40
41 do {
42 gfn_t real_gfn;
43 unsigned long host_addr;
44
45 pt_access &= pte_access;
46 --walker->level;
47
48 index = PT_INDEX(addr, walker->level);
49
50 table_gfn = gpte_to_gfn(pte);//GPA
51 offset = index * sizeof(pt_element_t);
52 pte_gpa = gfn_to_gpa(table_gfn) + offset;
53 walker->table_gfn[walker->level - 1] = table_gfn;//GPA,这里在后面的FNAME(fetch)会用到,hash映射到相应的kvm_mmu_page
54 walker->pte_gpa[walker->level - 1] = pte_gpa;
55
56 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
57 PFERR_USER_MASK|PFERR_WRITE_MASK);
58 if (unlikely(real_gfn == UNMAPPED_GVA))
59 goto error;
60 real_gfn = gpa_to_gfn(real_gfn);
61
62 host_addr = gfn_to_hva(vcpu->kvm, real_gfn);//这里转换成为了HVA
63 if (unlikely(kvm_is_error_hva(host_addr)))
64 goto error;
65
66 ptep_user = (pt_element_t __user *)((void *)host_addr + offset);//HVA,这里实际是只是计算,没有数据的访问
67 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))//从ptep_user(GPA) 对应的物理地址中得到数据,这里一定是通过的QEMU进程 页表
68 goto error;
69 walker->ptep_user[walker->level - 1] = ptep_user;//内核空间的地址
70
71 trace_kvm_mmu_paging_element(pte, walker->level);
72
73 if (unlikely(!is_present_gpte(pte)))
74 goto error;
75
76 if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
77 walker->level))) {
78 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
79 goto error;
80 }
81
82 accessed_dirty &= pte;
83 pte_access = pt_access & gpte_access(vcpu, pte);
84
85 walker->ptes[walker->level - 1] = pte;
86 } while (!is_last_gpte(mmu, walker->level, pte));
87
88 if (unlikely(permission_fault(mmu, pte_access, access))) {
89 errcode |= PFERR_PRESENT_MASK;
90 goto error;
91 }
92
93 gfn = gpte_to_gfn_lvl(pte, walker->level);
94 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
95
96 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
97 gfn += pse36_gfn_delta(pte);
98
99 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access);//
100 if (real_gpa == UNMAPPED_GVA)
101 return 0;
102
103 walker->gfn = real_gpa >> PAGE_SHIFT;//最后的gfn.猜测下面应该是根据gfn->HVA->HPA在更新相应的影子页表
104
105 if (!write_fault)
106 protect_clean_gpte(&pte_access, pte);
107
108 /*
109 * On a write fault, fold the dirty bit into accessed_dirty by shifting it one
110 * place right.
111 *
112 * On a read fault, do nothing.
113 */
114 shift = write_fault >> ilog2(PFERR_WRITE_MASK);
115 shift *= PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT;
116 accessed_dirty &= pte >> shift;
117
118 if (unlikely(!accessed_dirty)) {
119 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
120 if (unlikely(ret < 0))
121 goto error;
122 else if (ret)
123 goto retry_walk;
124 }
125
126 walker->pt_access = pt_access;
127 walker->pte_access = pte_access;
128 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
129 __func__, (u64)pte, pte_access, pt_access);
130 return 1;
131
132 error:
133 errcode |= write_fault | user_fault;//以下出现的情况都是客户机内存页表出现问题导致的
134 if (fetch_fault && (mmu->nx ||
135 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
136 errcode |= PFERR_FETCH_MASK;
137
138 walker->fault.vector = PF_VECTOR;
139 walker->fault.error_code_valid = true;
140 walker->fault.error_code = errcode;
141 walker->fault.address = addr;
142 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
143
144 trace_kvm_mmu_walker_error(walker->fault.error_code);
145 return 0;
实际客户机使用的页表,也是存在于内存中的,和影子页表不同,在产生缺页时,首先遍历客户机的页表,查找是否存在客户机虚拟地址->客户机物理地址的映射(FNAME(walk_addr_generic)),通过
try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, &map_writable)可以得GPA->HPA的映射,得到宿主机页框,由此在根据宿主机页框更新影子页表。
GPA->HPA的查找:GPA->HVA(memslot),HVA->HPA(查找QEMU进程对应的页表吧)。现在的问题是
对上面函数的说明:
walk_addr_generic 得到客户机页表CR3(GPA),根据分页机制得到一级,二级页表项,这里的页表项只是通过下面代码
offset = index * sizeof(pt_element_t); pte_gpa = gfn_to_gpa(table_gfn) + offset
计算出了对应的GPA,然后通过host_addr = gfn_to_hva(vcpu->kvm, real_gfn);将其转化成为HVA ,由下面ptep_user = (pt_element_t __user *)((void *)host_addr + offset);//HVA,得到相应的页表项对应的HVA->
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))这里通过了_copy_from_user得到了相应,ptep_user(HVA)对应的HPA中的数据(这里的映射是通过qemu进程页表完成的)。
cr3在哪里进行的赋值
关于影子页表的建立过程:
这里只是针对客户机以及存在了相应的页表项,page_fault是由于shallow不存在相应的映射,在利用walk_addr_generic()中得到的walker->gfn = real_gpa >> PAGE_SHIFT,
|
|