}
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
-static void req_bio_endio(struct request *rq, struct bio *bio,
-                         unsigned int nbytes, blk_status_t error)
-{
-       if (unlikely(error)) {
-               bio->bi_status = error;
-       } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
-               /*
-                * Partial zone append completions cannot be supported as the
-                * BIO fragments may end up not being written sequentially.
-                */
-               if (bio->bi_iter.bi_size != nbytes)
-                       bio->bi_status = BLK_STS_IOERR;
-               else
-                       bio->bi_iter.bi_sector = rq->__sector;
-       }
-
-       bio_advance(bio, nbytes);
-
-       if (unlikely(rq->rq_flags & RQF_QUIET))
-               bio_set_flag(bio, BIO_QUIET);
-       /* don't actually finish bio if it's part of flush sequence */
-       if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
-               bio_endio(bio);
-}
-
 static void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
        if (req->part && blk_do_io_stat(req)) {
 bool blk_update_request(struct request *req, blk_status_t error,
                unsigned int nr_bytes)
 {
+       bool is_flush = req->rq_flags & RQF_FLUSH_SEQ;
+       bool quiet = req->rq_flags & RQF_QUIET;
        int total_bytes;
 
        trace_block_rq_complete(req, error, nr_bytes);
        if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
                __blk_crypto_rq_put_keyslot(req);
 
-       if (unlikely(error && !blk_rq_is_passthrough(req) &&
-                    !(req->rq_flags & RQF_QUIET)) &&
-                    !test_bit(GD_DEAD, &req->q->disk->state)) {
+       if (unlikely(error && !blk_rq_is_passthrough(req) && !quiet) &&
+           !test_bit(GD_DEAD, &req->q->disk->state)) {
                blk_print_req_error(req, error);
                trace_block_rq_error(req, error, nr_bytes);
        }
                struct bio *bio = req->bio;
                unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_iter.bi_size)
+               if (unlikely(error))
+                       bio->bi_status = error;
+
+               if (bio_bytes == bio->bi_iter.bi_size) {
                        req->bio = bio->bi_next;
+               } else if (req_op(req) == REQ_OP_ZONE_APPEND &&
+                          error == BLK_STS_OK) {
+                       /*
+                        * Partial zone append completions cannot be supported
+                        * as the BIO fragments may end up not being written
+                        * sequentially.
+                        */
+                       bio->bi_status = BLK_STS_IOERR;
+               }
 
                /* Completion has already been traced */
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
-               req_bio_endio(req, bio, bio_bytes, error);
+               if (unlikely(quiet))
+                       bio_set_flag(bio, BIO_QUIET);
+
+               bio_advance(bio, bio_bytes);
+
+               /* Don't actually finish bio if it's part of flush sequence */
+               if (!bio->bi_iter.bi_size && !is_flush) {
+                       if (req_op(req) == REQ_OP_ZONE_APPEND)
+                               bio->bi_iter.bi_sector = req->__sector;
+                       bio_endio(bio);
+               }
 
                total_bytes += bio_bytes;
                nr_bytes -= bio_bytes;