Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0957105 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md:
  md: Update documentation for sync_min and sync_max entries
  md: Cleanup after raid45->raid0 takeover
  md: Fix dev_sectors on takeover from raid0 to raid4/5
  md/raid5: remove setting of ->queue_lock
parents 73aa8682 28a83978
Loading
Loading
Loading
Loading
+10 −0
Original line number Original line Diff line number Diff line
@@ -552,6 +552,16 @@ also have
     within the array where IO will be blocked.  This is currently
     within the array where IO will be blocked.  This is currently
     only supported for raid4/5/6.
     only supported for raid4/5/6.


   sync_min
   sync_max
     The two values, given as numbers of sectors, indicate a range
     withing the array where 'check'/'repair' will operate. Must be
     a multiple of chunk_size. When it reaches "sync_max" it will
     pause, rather than complete.
     You can use 'select' or 'poll' on "sync_completed" to wait for
     that number to reach sync_max.  Then you can either increase
     "sync_max", or can write 'idle' to "sync_action".



Each active md device may also have attributes specific to the
Each active md device may also have attributes specific to the
personality module that manages it.
personality module that manages it.
+1 −0
Original line number Original line Diff line number Diff line
@@ -3170,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
	mddev->layout = mddev->new_layout;
	mddev->layout = mddev->new_layout;
	mddev->chunk_sectors = mddev->new_chunk_sectors;
	mddev->chunk_sectors = mddev->new_chunk_sectors;
	mddev->delta_disks = 0;
	mddev->delta_disks = 0;
	mddev->degraded = 0;
	if (mddev->pers->sync_request == NULL) {
	if (mddev->pers->sync_request == NULL) {
		/* this is now an array without redundancy, so
		/* this is now an array without redundancy, so
		 * it must always be in_sync
		 * it must always be in_sync
+4 −1
Original line number Original line Diff line number Diff line
@@ -5151,7 +5151,6 @@ static int run(mddev_t *mddev)


		mddev->queue->backing_dev_info.congested_data = mddev;
		mddev->queue->backing_dev_info.congested_data = mddev;
		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
		mddev->queue->queue_lock = &conf->device_lock;


		chunk_size = mddev->chunk_sectors << 9;
		chunk_size = mddev->chunk_sectors << 9;
		blk_queue_io_min(mddev->queue, chunk_size);
		blk_queue_io_min(mddev->queue, chunk_size);
@@ -5679,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
static void *raid45_takeover_raid0(mddev_t *mddev, int level)
static void *raid45_takeover_raid0(mddev_t *mddev, int level)
{
{
	struct raid0_private_data *raid0_priv = mddev->private;
	struct raid0_private_data *raid0_priv = mddev->private;
	unsigned long long sectors;


	/* for raid0 takeover only one zone is supported */
	/* for raid0 takeover only one zone is supported */
	if (raid0_priv->nr_strip_zones > 1) {
	if (raid0_priv->nr_strip_zones > 1) {
@@ -5687,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level)
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);
	}
	}


	sectors = raid0_priv->strip_zone[0].zone_end;
	sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
	mddev->dev_sectors = sectors;
	mddev->new_level = level;
	mddev->new_level = level;
	mddev->new_layout = ALGORITHM_PARITY_N;
	mddev->new_layout = ALGORITHM_PARITY_N;
	mddev->new_chunk_sectors = mddev->chunk_sectors;
	mddev->new_chunk_sectors = mddev->chunk_sectors;