There are several functions which pass an access permission mask for SPTEs as an unsigned. This works, but checkpatch complains about it. Switch the occurrences of unsigned to unsigned int to satisfy checkpatch.
No functional change expected.
Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures.
This commit can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2358
Signed-off-by: Ben Gardon bgardon@google.com Reviewed-by: Oliver Upton oupton@google.com --- arch/x86/kvm/mmu/mmu.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 84eeb61d06aa3..a9c593dec49bf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -452,7 +452,7 @@ static u64 get_mmio_spte_generation(u64 spte) }
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, - unsigned access) + unsigned int access) { u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; u64 mask = generation_mmio_spte_mask(gen); @@ -484,7 +484,7 @@ static unsigned get_mmio_spte_access(u64 spte) }
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, - kvm_pfn_t pfn, unsigned access) + kvm_pfn_t pfn, unsigned int access) { if (unlikely(is_noslot_pfn(pfn))) { mark_mmio_spte(vcpu, sptep, gfn, access); @@ -2475,7 +2475,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gva_t gaddr, unsigned level, int direct, - unsigned access) + unsigned int access) { union kvm_mmu_page_role role; unsigned quadrant; @@ -2990,7 +2990,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned pte_access, int level, + unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) { @@ -3081,9 +3081,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, return ret; }
-static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, - int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, - bool speculative, bool host_writable) +static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, + unsigned int pte_access, int write_fault, int level, + gfn_t gfn, kvm_pfn_t pfn, bool speculative, + bool host_writable) { int was_rmapped = 0; int rmap_count; @@ -3165,7 +3166,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, { struct page *pages[PTE_PREFETCH_NUM]; struct kvm_memory_slot *slot; - unsigned access = sp->role.access; + unsigned int access = sp->role.access; int i, ret; gfn_t gfn;
@@ -3400,7 +3401,8 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) }
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, - kvm_pfn_t pfn, unsigned access, int *ret_val) + kvm_pfn_t pfn, unsigned int access, + int *ret_val) { /* The pfn is invalid, report the error! */ if (unlikely(is_error_pfn(pfn))) { @@ -4005,7 +4007,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
if (is_mmio_spte(spte)) { gfn_t gfn = get_mmio_spte_gfn(spte); - unsigned access = get_mmio_spte_access(spte); + unsigned int access = get_mmio_spte_access(spte);
if (!check_mmio_spte(vcpu, spte)) return RET_PF_INVALID; @@ -4349,7 +4351,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu, }
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, - unsigned access, int *nr_present) + unsigned int access, int *nr_present) { if (unlikely(is_mmio_spte(*sptep))) { if (gfn != get_mmio_spte_gfn(*sptep)) {
Separate the functions for generating MMIO page table entries from the function that inserts them into the paging structure. This refactoring will facilitate changes to the MMU sychronization model to use atomic compare / exchanges (which are not guaranteed to succeed) instead of a monolithic MMU lock.
No functional change expected.
Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures.
This commit can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2359
Signed-off-by: Ben Gardon bgardon@google.com Reviewed-by: Oliver Upton oupton@google.com Reviewed-by: Peter Shier pshier@google.com --- arch/x86/kvm/mmu/mmu.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a9c593dec49bf..b81010d0edae1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -451,9 +451,9 @@ static u64 get_mmio_spte_generation(u64 spte) return gen; }
-static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, - unsigned int access) +static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) { + u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; u64 mask = generation_mmio_spte_mask(gen); u64 gpa = gfn << PAGE_SHIFT; @@ -464,6 +464,17 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, mask |= (gpa & shadow_nonpresent_or_rsvd_mask) << shadow_nonpresent_or_rsvd_mask_len;
+ return mask; +} + +static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, + unsigned int access) +{ + u64 mask = make_mmio_spte(vcpu, gfn, access); + unsigned int gen = get_mmio_spte_generation(mask); + + access = mask & ACC_ALL; + trace_mark_mmio_spte(sptep, gfn, access, gen); mmu_spte_set(sptep, mask); }
Ben Gardon bgardon@google.com writes:
Separate the functions for generating MMIO page table entries from the function that inserts them into the paging structure. This refactoring will facilitate changes to the MMU sychronization model to use atomic compare / exchanges (which are not guaranteed to succeed) instead of a monolithic MMU lock.
No functional change expected.
Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures.
This commit can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2359
Signed-off-by: Ben Gardon bgardon@google.com Reviewed-by: Oliver Upton oupton@google.com Reviewed-by: Peter Shier pshier@google.com
arch/x86/kvm/mmu/mmu.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a9c593dec49bf..b81010d0edae1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -451,9 +451,9 @@ static u64 get_mmio_spte_generation(u64 spte) return gen; } -static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access)
+static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) {
Unneded newline.
u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; u64 mask = generation_mmio_spte_mask(gen); u64 gpa = gfn << PAGE_SHIFT; @@ -464,6 +464,17 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, mask |= (gpa & shadow_nonpresent_or_rsvd_mask) << shadow_nonpresent_or_rsvd_mask_len;
- return mask;
+}
+static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access)
+{
- u64 mask = make_mmio_spte(vcpu, gfn, access);
- unsigned int gen = get_mmio_spte_generation(mask);
- access = mask & ACC_ALL;
- trace_mark_mmio_spte(sptep, gfn, access, gen);
'access' and 'gen' are only being used for tracing, would it rather make sense to rename&move it to the newly introduced make_mmio_spte()? Or do we actually need tracing for both?
Also, I dislike re-purposing function parameters.
mmu_spte_set(sptep, mask); }
On 05/02/20 14:37, Vitaly Kuznetsov wrote:
+static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access)
+{
- u64 mask = make_mmio_spte(vcpu, gfn, access);
- unsigned int gen = get_mmio_spte_generation(mask);
- access = mask & ACC_ALL;
- trace_mark_mmio_spte(sptep, gfn, access, gen);
'access' and 'gen' are only being used for tracing, would it rather make sense to rename&move it to the newly introduced make_mmio_spte()? Or do we actually need tracing for both?
You would have the same issue with sptep.
Also, I dislike re-purposing function parameters.
Yes, "trace_mark_mmio_spte(sptep, gfn, mask & ACC_ALL, gen);" is slightly better.
Paolo
Separate the functions for generating leaf page table entries from the function that inserts them into the paging structure. This refactoring will facilitate changes to the MMU sychronization model to use atomic compare / exchanges (which are not guaranteed to succeed) instead of a monolithic MMU lock.
No functional change expected.
Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures.
This commit can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2360
Signed-off-by: Ben Gardon bgardon@google.com Reviewed-by: Peter Shier pshier@google.com --- arch/x86/kvm/mmu/mmu.c | 52 +++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b81010d0edae1..9239ad5265dc6 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3000,20 +3000,14 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
-static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned int pte_access, int level, - gfn_t gfn, kvm_pfn_t pfn, bool speculative, - bool can_unsync, bool host_writable) +static u64 make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, + gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, + bool can_unsync, bool host_writable, bool ad_disabled, + int *ret) { u64 spte = 0; - int ret = 0; - struct kvm_mmu_page *sp; - - if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) - return 0;
- sp = page_header(__pa(sptep)); - if (sp_ad_disabled(sp)) + if (ad_disabled) spte |= SPTE_AD_DISABLED_MASK; else if (kvm_vcpu_ad_need_write_protect(vcpu)) spte |= SPTE_AD_WRPROT_ONLY_MASK; @@ -3066,27 +3060,49 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, * is responsibility of mmu_get_page / kvm_sync_page. * Same reasoning can be applied to dirty page accounting. */ - if (!can_unsync && is_writable_pte(*sptep)) - goto set_pte; + if (!can_unsync && is_writable_pte(old_spte)) + return spte;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); - ret |= SET_SPTE_WRITE_PROTECTED_PT; + *ret |= SET_SPTE_WRITE_PROTECTED_PT; pte_access &= ~ACC_WRITE_MASK; spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); } }
- if (pte_access & ACC_WRITE_MASK) { - kvm_vcpu_mark_page_dirty(vcpu, gfn); + if (pte_access & ACC_WRITE_MASK) spte |= spte_shadow_dirty_mask(spte); - }
if (speculative) spte = mark_spte_for_access_track(spte);
-set_pte: + return spte; +} + +static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, + unsigned int pte_access, int level, + gfn_t gfn, kvm_pfn_t pfn, bool speculative, + bool can_unsync, bool host_writable) +{ + u64 spte = 0; + struct kvm_mmu_page *sp; + int ret = 0; + + if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) + return 0; + + sp = page_header(__pa(sptep)); + + spte = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, + can_unsync, host_writable, sp_ad_disabled(sp), &ret); + if (!spte) + return 0; + + if (spte & PT_WRITABLE_MASK) + kvm_vcpu_mark_page_dirty(vcpu, gfn); + if (mmu_spte_update(sptep, spte)) ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; return ret;
Ben Gardon bgardon@google.com writes:
Separate the functions for generating leaf page table entries from the function that inserts them into the paging structure. This refactoring will facilitate changes to the MMU sychronization model to use atomic compare / exchanges (which are not guaranteed to succeed) instead of a monolithic MMU lock.
No functional change expected.
Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures.
This commit can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2360
Signed-off-by: Ben Gardon bgardon@google.com Reviewed-by: Peter Shier pshier@google.com
arch/x86/kvm/mmu/mmu.c | 52 +++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b81010d0edae1..9239ad5265dc6 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3000,20 +3000,14 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) -static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
+static u64 make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
bool can_unsync, bool host_writable, bool ad_disabled,
int *ret)
With such a long parameter list we may think about passing a pointer to a structure instead (common for make_spte()/set_spte())
{ u64 spte = 0;
- int ret = 0;
- struct kvm_mmu_page *sp;
- if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0;
- sp = page_header(__pa(sptep));
- if (sp_ad_disabled(sp))
- if (ad_disabled) spte |= SPTE_AD_DISABLED_MASK; else if (kvm_vcpu_ad_need_write_protect(vcpu)) spte |= SPTE_AD_WRPROT_ONLY_MASK;
@@ -3066,27 +3060,49 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, * is responsibility of mmu_get_page / kvm_sync_page. * Same reasoning can be applied to dirty page accounting. */
if (!can_unsync && is_writable_pte(*sptep))
goto set_pte;
if (!can_unsync && is_writable_pte(old_spte))
return spte;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn);
ret |= SET_SPTE_WRITE_PROTECTED_PT;
} }*ret |= SET_SPTE_WRITE_PROTECTED_PT; pte_access &= ~ACC_WRITE_MASK; spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
- if (pte_access & ACC_WRITE_MASK) {
kvm_vcpu_mark_page_dirty(vcpu, gfn);
- if (pte_access & ACC_WRITE_MASK) spte |= spte_shadow_dirty_mask(spte);
- }
if (speculative) spte = mark_spte_for_access_track(spte); -set_pte:
- return spte;
+}
+static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
+{
- u64 spte = 0;
- struct kvm_mmu_page *sp;
- int ret = 0;
- if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0;
- sp = page_header(__pa(sptep));
- spte = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
can_unsync, host_writable, sp_ad_disabled(sp), &ret);
I'm probably missing something, but in make_spte() I see just one place which writes to '*ret' so at the end, this is either SET_SPTE_WRITE_PROTECTED_PT or 0 (which we got only because we initialize it to 0 in set_spte()). Unless this is preparation to some other change, I don't see much value in the complication.
Can we actually reverse the logic, pass 'spte' by reference and return 'ret'?
- if (!spte)
return 0;
- if (spte & PT_WRITABLE_MASK)
kvm_vcpu_mark_page_dirty(vcpu, gfn);
- if (mmu_spte_update(sptep, spte)) ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; return ret;
On 05/02/20 14:52, Vitaly Kuznetsov wrote:
- spte = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
can_unsync, host_writable, sp_ad_disabled(sp), &ret);
I'm probably missing something, but in make_spte() I see just one place which writes to '*ret' so at the end, this is either SET_SPTE_WRITE_PROTECTED_PT or 0 (which we got only because we initialize it to 0 in set_spte()). Unless this is preparation to some other change, I don't see much value in the complication.
Can we actually reverse the logic, pass 'spte' by reference and return 'ret'?
It gives a similar calling convention between make_spte and make_mmio_spte. It's not the most beautiful thing but I think I prefer it.
But the overwhelming function parameters are quite ugly, especially old_spte. I don't think it's an improvement, let's consider it together with the rest of your changes instead.
Paolo
Ben Gardon bgardon@google.com writes:
There are several functions which pass an access permission mask for SPTEs as an unsigned. This works, but checkpatch complains about it. Switch the occurrences of unsigned to unsigned int to satisfy checkpatch.
No functional change expected.
Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures.
This commit can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2358
Signed-off-by: Ben Gardon bgardon@google.com Reviewed-by: Oliver Upton oupton@google.com
arch/x86/kvm/mmu/mmu.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 84eeb61d06aa3..a9c593dec49bf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -452,7 +452,7 @@ static u64 get_mmio_spte_generation(u64 spte) } static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned access)
unsigned int access)
{ u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; u64 mask = generation_mmio_spte_mask(gen); @@ -484,7 +484,7 @@ static unsigned get_mmio_spte_access(u64 spte) } static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
kvm_pfn_t pfn, unsigned access)
kvm_pfn_t pfn, unsigned int access)
{ if (unlikely(is_noslot_pfn(pfn))) { mark_mmio_spte(vcpu, sptep, gfn, access); @@ -2475,7 +2475,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gva_t gaddr, unsigned level, int direct,
unsigned access)
unsigned int access)
{ union kvm_mmu_page_role role; unsigned quadrant; @@ -2990,7 +2990,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative, bool can_unsync, bool host_writable)unsigned int pte_access, int level,
{ @@ -3081,9 +3081,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, return ret; } -static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
bool speculative, bool host_writable)
+static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned int pte_access, int write_fault, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool host_writable)
{ int was_rmapped = 0; int rmap_count; @@ -3165,7 +3166,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, { struct page *pages[PTE_PREFETCH_NUM]; struct kvm_memory_slot *slot;
- unsigned access = sp->role.access;
- unsigned int access = sp->role.access; int i, ret; gfn_t gfn;
@@ -3400,7 +3401,8 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) } static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
kvm_pfn_t pfn, unsigned access, int *ret_val)
kvm_pfn_t pfn, unsigned int access,
int *ret_val)
{ /* The pfn is invalid, report the error! */ if (unlikely(is_error_pfn(pfn))) { @@ -4005,7 +4007,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) if (is_mmio_spte(spte)) { gfn_t gfn = get_mmio_spte_gfn(spte);
unsigned access = get_mmio_spte_access(spte);
unsigned int access = get_mmio_spte_access(spte);
if (!check_mmio_spte(vcpu, spte)) return RET_PF_INVALID; @@ -4349,7 +4351,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu, } static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned access, int *nr_present)
unsigned int access, int *nr_present)
{ if (unlikely(is_mmio_spte(*sptep))) { if (gfn != get_mmio_spte_gfn(*sptep)) {
Reviewed-by: Vitaly Kuznetsov vkuznets@redhat.com
On Mon, Feb 03, 2020 at 03:09:09PM -0800, Ben Gardon wrote:
There are several functions which pass an access permission mask for SPTEs as an unsigned. This works, but checkpatch complains about it. Switch the occurrences of unsigned to unsigned int to satisfy checkpatch.
No functional change expected.
Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures.
This commit can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2358
Signed-off-by: Ben Gardon bgardon@google.com Reviewed-by: Oliver Upton oupton@google.com
Reviewed-by: Peter Xu peterx@redhat.com
linux-kselftest-mirror@lists.linaro.org