Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81def6b9 authored by Joern Engel's avatar Joern Engel
Browse files

Simplify and fix pad_wbuf



A comment in the old code read:
        /* The math in this function can surely use some love */

And indeed it did.  In the case that area->a_used_bytes is exactly
4096 bytes below segment size it fell apart.  pad_wbuf is now split
into two helpers that are significantly less complicated.

Signed-off-by: default avatarJoern Engel <joern@logfs.org>
parent 19321917
Loading
Loading
Loading
Loading
+30 −22
Original line number Diff line number Diff line
@@ -93,47 +93,55 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
	} while (len);
}

/*
 * bdev_writeseg will write full pages.  Memset the tail to prevent data leaks.
 */
static void pad_wbuf(struct logfs_area *area, int final)
static void pad_partial_page(struct logfs_area *area)
{
	struct super_block *sb = area->a_sb;
	struct logfs_super *super = logfs_super(sb);
	struct page *page;
	u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
	pgoff_t index = ofs >> PAGE_SHIFT;
	long offset = ofs & (PAGE_SIZE-1);
	u32 len = PAGE_SIZE - offset;

	if (len == PAGE_SIZE) {
		/* The math in this function can surely use some love */
		len = 0;
	}
	if (len) {
		BUG_ON(area->a_used_bytes >= super->s_segsize);

		page = get_mapping_page(area->a_sb, index, 0);
	if (len % PAGE_SIZE) {
		page = get_mapping_page(sb, index, 0);
		BUG_ON(!page); /* FIXME: reserve a pool */
		memset(page_address(page) + offset, 0xff, len);
		SetPagePrivate(page);
		page_cache_release(page);
	}
}

	if (!final)
		return;
static void pad_full_pages(struct logfs_area *area)
{
	struct super_block *sb = area->a_sb;
	struct logfs_super *super = logfs_super(sb);
	u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
	u32 len = super->s_segsize - area->a_used_bytes;
	pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
	pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
	struct page *page;

	area->a_used_bytes += len;
	for ( ; area->a_used_bytes < super->s_segsize;
			area->a_used_bytes += PAGE_SIZE) {
		/* Memset another page */
		index++;
		page = get_mapping_page(area->a_sb, index, 0);
	while (no_indizes) {
		page = get_mapping_page(sb, index, 0);
		BUG_ON(!page); /* FIXME: reserve a pool */
		memset(page_address(page), 0xff, PAGE_SIZE);
		SetPageUptodate(page);
		memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
		SetPagePrivate(page);
		page_cache_release(page);
		index++;
		no_indizes--;
	}
}

/*
 * bdev_writeseg will write full pages.  Memset the tail to prevent data leaks.
 * Also make sure we allocate (and memset) all pages for final writeout.
 */
static void pad_wbuf(struct logfs_area *area, int final)
{
	pad_partial_page(area);
	if (final)
		pad_full_pages(area);
}

/*