Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4c20b9a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull percpu updates from Tejun Heo:
 "These are the percpu changes for the v4.13-rc1 merge window. There are
  a couple visibility related changes - tracepoints and allocator stats
  through debugfs, along with __ro_after_init markings and a cosmetic
  rename in percpu_counter.

  Please note that the simple O(#elements_in_the_chunk) area allocator
  used by percpu allocator is again showing scalability issues,
  primarily with bpf allocating and freeing large number of counters.
  Dennis is working on the replacement allocator and the percpu
  allocator will be seeing increased churns in the coming cycles"

* 'for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu: fix static checker warnings in pcpu_destroy_chunk
  percpu: fix early calls for spinlock in pcpu_stats
  percpu: resolve err may not be initialized in pcpu_alloc
  percpu_counter: Rename __percpu_counter_add to percpu_counter_add_batch
  percpu: add tracepoint support for percpu memory
  percpu: expose statistics about percpu memory via debugfs
  percpu: migrate percpu data structures to internal header
  percpu: add missing lockdep_assert_held to func pcpu_free_area
  mark most percpu globals as __ro_after_init
parents 9b51f044 e3efe3db
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -1256,7 +1256,7 @@ void clean_tree_block(struct btrfs_fs_info *fs_info,
		btrfs_assert_tree_locked(buf);

		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
			__percpu_counter_add(&fs_info->dirty_metadata_bytes,
			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
						 -buf->len,
						 fs_info->dirty_metadata_batch);
			/* ugh, clear_extent_buffer_dirty needs to lock the page */
@@ -4047,7 +4047,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
			buf->start, transid, fs_info->generation);
	was_dirty = set_extent_buffer_dirty(buf);
	if (!was_dirty)
		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
					 buf->len,
					 fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+3 −3
Original line number Diff line number Diff line
@@ -3577,7 +3577,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
		spin_unlock(&eb->refs_lock);
		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
					 -eb->len,
					 fs_info->dirty_metadata_batch);
		ret = 1;
+4 −4
Original line number Diff line number Diff line
@@ -1763,7 +1763,7 @@ static void btrfs_set_bit_hook(void *private_data,
		if (btrfs_is_testing(fs_info))
			return;

		__percpu_counter_add(&fs_info->delalloc_bytes, len,
		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
					 fs_info->delalloc_batch);
		spin_lock(&BTRFS_I(inode)->lock);
		BTRFS_I(inode)->delalloc_bytes += len;
@@ -1838,7 +1838,7 @@ static void btrfs_clear_bit_hook(void *private_data,
					&inode->vfs_inode,
					state->start, len);

		__percpu_counter_add(&fs_info->delalloc_bytes, -len,
		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
					 fs_info->delalloc_batch);
		spin_lock(&inode->lock);
		inode->delalloc_bytes -= len;
+2 −2
Original line number Diff line number Diff line
@@ -1211,7 +1211,7 @@ xfs_mod_icount(
	struct xfs_mount	*mp,
	int64_t			delta)
{
	__percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
	percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
	if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
		ASSERT(0);
		percpu_counter_add(&mp->m_icount, -delta);
@@ -1290,7 +1290,7 @@ xfs_mod_fdblocks(
	else
		batch = XFS_FDBLOCKS_BATCH;

	__percpu_counter_add(&mp->m_fdblocks, delta, batch);
	percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
	if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
				     XFS_FDBLOCKS_BATCH) >= 0) {
		/* we had space! */
+1 −1
Original line number Diff line number Diff line
@@ -66,7 +66,7 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
static inline void __add_wb_stat(struct bdi_writeback *wb,
				 enum wb_stat_item item, s64 amount)
{
	__percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}

static inline void __inc_wb_stat(struct bdi_writeback *wb,
Loading