From: NeilBrown <neilb@suse.com>
Date: Sat, 5 Sep 2015 09:07:04 +0000 (+0200)
Subject: Merge linux-block/for-4.3/core into md/for-linux
X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=e89c6fdf9e0eb1b5a03574d4ca73e83eae8deb91;p=linux.git

Merge linux-block/for-4.3/core into md/for-linux

There were a few conflicts that are fairly easy to resolve.

Signed-off-by: NeilBrown <neilb@suse.com>
---

e89c6fdf9e0eb1b5a03574d4ca73e83eae8deb91
diff --cc drivers/md/raid0.c
index 4a13c3cb940b8,59cda501a224a..63e619b2f44eb
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@@ -203,9 -188,10 +203,6 @@@ static int create_strip_zones(struct md
  		}
  		dev[j] = rdev1;
  
- 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
- 			conf->has_merge_bvec = 1;
 -		if (mddev->queue)
 -			disk_stack_limits(mddev->gendisk, rdev1->bdev,
 -					  rdev1->data_offset << 9);
--
  		if (!smallest || (rdev1->sectors < smallest->sectors))
  			smallest = rdev1;
  		cnt++;
diff --cc drivers/md/raid10.c
index a14c304aa751c,b0fce2ebf7ad2..0fc33eb888551
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@@ -2750,9 -2633,7 +2635,8 @@@ static void handle_write_completed(stru
  					r10_bio->devs[m].addr,
  					r10_bio->sectors, 0);
  				rdev_dec_pending(rdev, conf->mddev);
- 			} else if (bio != NULL &&
- 				   !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ 			} else if (bio != NULL && bio->bi_error) {
 +				fail = true;
  				if (!narrow_write_error(r10_bio, m)) {
  					md_error(conf->mddev, rdev);
  					set_bit(R10BIO_Degraded,
diff --cc drivers/md/raid5.c
index 4195064460d0e,b29e89cb815b9..15ef2c641b2b9
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@@ -230,7 -233,8 +230,7 @@@ static void return_io(struct bio_list *
  		bi->bi_iter.bi_size = 0;
  		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
  					 bi, 0);
- 		bio_endio(bi, 0);
+ 		bio_endio(bi);
 -		bi = return_bi;
  	}
  }
  
@@@ -3107,10 -3110,12 +3105,11 @@@ handle_failed_stripe(struct r5conf *con
  		while (bi && bi->bi_iter.bi_sector <
  			sh->dev[i].sector + STRIPE_SECTORS) {
  			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- 			clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ 
+ 			bi->bi_error = -EIO;
  			if (!raid5_dec_bi_active_stripes(bi)) {
  				md_write_end(conf->mddev);
 -				bi->bi_next = *return_bi;
 -				*return_bi = bi;
 +				bio_list_add(return_bi, bi);
  			}
  			bi = nextbi;
  		}
@@@ -3130,10 -3135,12 +3129,11 @@@
  		while (bi && bi->bi_iter.bi_sector <
  		       sh->dev[i].sector + STRIPE_SECTORS) {
  			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- 			clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ 
+ 			bi->bi_error = -EIO;
  			if (!raid5_dec_bi_active_stripes(bi)) {
  				md_write_end(conf->mddev);
 -				bi->bi_next = *return_bi;
 -				*return_bi = bi;
 +				bio_list_add(return_bi, bi);
  			}
  			bi = bi2;
  		}
@@@ -3154,9 -3161,12 +3154,10 @@@
  			       sh->dev[i].sector + STRIPE_SECTORS) {
  				struct bio *nextbi =
  					r5_next_bio(bi, sh->dev[i].sector);
- 				clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ 
+ 				bi->bi_error = -EIO;
 -				if (!raid5_dec_bi_active_stripes(bi)) {
 -					bi->bi_next = *return_bi;
 -					*return_bi = bi;
 -				}
 +				if (!raid5_dec_bi_active_stripes(bi))
 +					bio_list_add(return_bi, bi);
  				bi = nextbi;
  			}
  		}
@@@ -4667,43 -4670,14 +4668,14 @@@ static int raid5_congested(struct mdde
  	return 0;
  }
  
- /* We want read requests to align with chunks where possible,
-  * but write requests don't need to.
-  */
- static int raid5_mergeable_bvec(struct mddev *mddev,
- 				struct bvec_merge_data *bvm,
- 				struct bio_vec *biovec)
- {
- 	struct r5conf *conf = mddev->private;
- 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- 	int max;
- 	unsigned int chunk_sectors;
- 	unsigned int bio_sectors = bvm->bi_size >> 9;
- 
- 	/*
- 	 * always allow writes to be mergeable, read as well if array
- 	 * is degraded as we'll go through stripe cache anyway.
- 	 */
- 	if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
- 		return biovec->bv_len;
- 
- 	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
- 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
- 	if (max < 0) max = 0;
- 	if (max <= biovec->bv_len && bio_sectors == 0)
- 		return biovec->bv_len;
- 	else
- 		return max;
- }
- 
  static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
  {
 +	struct r5conf *conf = mddev->private;
  	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
 -	unsigned int chunk_sectors = mddev->chunk_sectors;
 +	unsigned int chunk_sectors;
  	unsigned int bio_sectors = bio_sectors(bio);
  
 -	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
 -		chunk_sectors = mddev->new_chunk_sectors;
 +	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
  	return  chunk_sectors >=
  		((sector & (chunk_sectors - 1)) + bio_sectors);
  }