Loading mm/memory.c +5 −1 Original line number Diff line number Diff line Loading @@ -4594,7 +4594,11 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * because vm_next and vm_prev must be safe. This can't be guaranteed * in the speculative path. */ if (unlikely(vma_is_anonymous(vmf.vma) && !vmf.vma->anon_vma)) { if (unlikely((vma_is_anonymous(vmf.vma) && !vmf.vma->anon_vma) || (!vma_is_anonymous(vmf.vma) && !(vmf.vma->vm_flags & VM_SHARED) && (vmf.flags & FAULT_FLAG_WRITE) && !vmf.vma->anon_vma))) { trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); return VM_FAULT_RETRY; } Loading Loading
mm/memory.c +5 −1 Original line number Diff line number Diff line Loading @@ -4594,7 +4594,11 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * because vm_next and vm_prev must be safe. This can't be guaranteed * in the speculative path. */ if (unlikely(vma_is_anonymous(vmf.vma) && !vmf.vma->anon_vma)) { if (unlikely((vma_is_anonymous(vmf.vma) && !vmf.vma->anon_vma) || (!vma_is_anonymous(vmf.vma) && !(vmf.vma->vm_flags & VM_SHARED) && (vmf.flags & FAULT_FLAG_WRITE) && !vmf.vma->anon_vma))) { trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); return VM_FAULT_RETRY; } Loading