ref->pfn = pfn;
ref->flags = E500_TLB_VALID;
- if (tlbe_is_writable(gtlbe)) {
- ref->flags |= E500_TLB_DIRTY;
+ if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
- }
}
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
+ trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
+ trace_kvm_unmap_hva(hva);
+
/*
* Flush all shadow tlb entries everywhere. This is slow, but
* we are 100% sure that we catch the to be unmapped page
if (!vcpu_e500->gtlb_priv[1])
goto err;
- vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
+ vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
vcpu_e500->gtlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->g2h_tlb1_map)