Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2f6d9c4 authored by Theodore Ts'o's avatar Theodore Ts'o
Browse files

Merge branch 'dax-4.10-iomap-pmd' into origin

parents bc33b0ca 9484ab1b
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -55,7 +55,6 @@ config FS_DAX_PMD
	depends on FS_DAX
	depends on FS_DAX
	depends on ZONE_DEVICE
	depends on ZONE_DEVICE
	depends on TRANSPARENT_HUGEPAGE
	depends on TRANSPARENT_HUGEPAGE
	depends on BROKEN


endif # BLOCK
endif # BLOCK


+464 −364

File changed.

Preview size limit exceeded, changes collapsed.

+9 −26
Original line number Original line Diff line number Diff line
@@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
		return 0; /* skip atime */
		return 0; /* skip atime */


	inode_lock_shared(inode);
	inode_lock_shared(inode);
	ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops);
	ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
	inode_unlock_shared(inode);
	inode_unlock_shared(inode);


	file_accessed(iocb->ki_filp);
	file_accessed(iocb->ki_filp);
@@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
	if (ret)
	if (ret)
		goto out_unlock;
		goto out_unlock;


	ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops);
	ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		i_size_write(inode, iocb->ki_pos);
		mark_inode_dirty(inode);
		mark_inode_dirty(inode);
@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
	}
	}
	down_read(&ei->dax_sem);
	down_read(&ei->dax_sem);


	ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops);
	ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);


	up_read(&ei->dax_sem);
	up_read(&ei->dax_sem);
	if (vmf->flags & FAULT_FLAG_WRITE)
	if (vmf->flags & FAULT_FLAG_WRITE)
@@ -107,27 +107,6 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
	return ret;
	return ret;
}
}


static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
						pmd_t *pmd, unsigned int flags)
{
	struct inode *inode = file_inode(vma->vm_file);
	struct ext2_inode_info *ei = EXT2_I(inode);
	int ret;

	if (flags & FAULT_FLAG_WRITE) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vma->vm_file);
	}
	down_read(&ei->dax_sem);

	ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);

	up_read(&ei->dax_sem);
	if (flags & FAULT_FLAG_WRITE)
		sb_end_pagefault(inode->i_sb);
	return ret;
}

static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
		struct vm_fault *vmf)
		struct vm_fault *vmf)
{
{
@@ -154,7 +133,11 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,


static const struct vm_operations_struct ext2_dax_vm_ops = {
static const struct vm_operations_struct ext2_dax_vm_ops = {
	.fault		= ext2_dax_fault,
	.fault		= ext2_dax_fault,
	.pmd_fault	= ext2_dax_pmd_fault,
	/*
	 * .pmd_fault is not supported for DAX because allocation in ext2
	 * cannot be reliably aligned to huge page sizes and so pmd faults
	 * will always fail and fail back to regular faults.
	 */
	.page_mkwrite	= ext2_dax_fault,
	.page_mkwrite	= ext2_dax_fault,
	.pfn_mkwrite	= ext2_dax_pfn_mkwrite,
	.pfn_mkwrite	= ext2_dax_pfn_mkwrite,
};
};
@@ -166,7 +149,7 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)


	file_accessed(file);
	file_accessed(file);
	vma->vm_ops = &ext2_dax_vm_ops;
	vma->vm_ops = &ext2_dax_vm_ops;
	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
	vma->vm_flags |= VM_MIXEDMAP;
	return 0;
	return 0;
}
}
#else
#else
+3 −0
Original line number Original line Diff line number Diff line
@@ -767,6 +767,9 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
		ext4_update_bh_state(bh, map.m_flags);
		ext4_update_bh_state(bh, map.m_flags);
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
		ret = 0;
		ret = 0;
	} else if (ret == 0) {
		/* hole case, need to fill in bh->b_size */
		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
	}
	}
	return ret;
	return ret;
}
}
+3 −2
Original line number Original line Diff line number Diff line
@@ -467,8 +467,9 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,


	offset = page_offset(page);
	offset = page_offset(page);
	while (length > 0) {
	while (length > 0) {
		ret = iomap_apply(inode, offset, length, IOMAP_WRITE,
		ret = iomap_apply(inode, offset, length,
				ops, page, iomap_page_mkwrite_actor);
				IOMAP_WRITE | IOMAP_FAULT, ops, page,
				iomap_page_mkwrite_actor);
		if (unlikely(ret <= 0))
		if (unlikely(ret <= 0))
			goto out_unlock;
			goto out_unlock;
		offset += ret;
		offset += ret;
Loading