};
 
 #ifdef CONFIG_CEPH_FSCACHE
-static void ceph_set_page_fscache(struct page *page)
-{
-       folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
-}
-
 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
 {
        struct inode *inode = priv;
                               ceph_fscache_write_terminated, inode, true, caching);
 }
 #else
-static inline void ceph_set_page_fscache(struct page *page)
-{
-}
-
 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
 {
 }
                len = wlen;
 
        set_page_writeback(page);
-       if (caching)
-               ceph_set_page_fscache(page);
        ceph_fscache_write_to_cache(inode, page_off, len, caching);
 
        if (IS_ENCRYPTED(inode)) {
                return AOP_WRITEPAGE_ACTIVATE;
        }
 
-       folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
-
        err = writepage_nounlock(page, wbc);
        if (err == -ERESTARTSYS) {
                /* direct memory reclaimer was killed by SIGKILL. return 0
                                unlock_page(page);
                                break;
                        }
-                       if (PageWriteback(page) ||
-                           PagePrivate2(page) /* [DEPRECATED] */) {
+                       if (PageWriteback(page)) {
                                if (wbc->sync_mode == WB_SYNC_NONE) {
                                        doutc(cl, "%p under writeback\n", page);
                                        unlock_page(page);
                                }
                                doutc(cl, "waiting on writeback %p\n", page);
                                wait_on_page_writeback(page);
-                               folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
                        }
 
                        if (!clear_page_dirty_for_io(page)) {
                        }
 
                        set_page_writeback(page);
-                       if (caching)
-                               ceph_set_page_fscache(page);
                        len += thp_size(page);
                }
                ceph_fscache_write_to_cache(inode, offset, len, caching);
 
        netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
 }
 
-/*
- * [DEPRECATED] Deal with the completion of writing the data to the cache.  We
- * have to clear the PG_fscache bits on the folios involved and release the
- * caller's ref.
- *
- * May be called in softirq mode and we inherit a ref from the caller.
- */
-static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
-                                         bool was_async)
-{
-       struct netfs_io_subrequest *subreq;
-       struct folio *folio;
-       pgoff_t unlocked = 0;
-       bool have_unlocked = false;
-
-       rcu_read_lock();
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
-
-               xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
-                       if (xas_retry(&xas, folio))
-                               continue;
-
-                       /* We might have multiple writes from the same huge
-                        * folio, but we mustn't unlock a folio more than once.
-                        */
-                       if (have_unlocked && folio->index <= unlocked)
-                               continue;
-                       unlocked = folio_next_index(folio) - 1;
-                       trace_netfs_folio(folio, netfs_folio_trace_end_copy);
-                       folio_end_private_2(folio);
-                       have_unlocked = true;
-               }
-       }
-
-       rcu_read_unlock();
-       netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
-                                      bool was_async) /* [DEPRECATED] */
-{
-       struct netfs_io_subrequest *subreq = priv;
-       struct netfs_io_request *rreq = subreq->rreq;
-
-       if (IS_ERR_VALUE(transferred_or_error)) {
-               netfs_stat(&netfs_n_rh_write_failed);
-               trace_netfs_failure(rreq, subreq, transferred_or_error,
-                                   netfs_fail_copy_to_cache);
-       } else {
-               netfs_stat(&netfs_n_rh_write_done);
-       }
-
-       trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
-
-       /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_copy_ops))
-               netfs_rreq_unmark_after_write(rreq, was_async);
-
-       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
-}
-
-/*
- * [DEPRECATED] Perform any outstanding writes to the cache.  We inherit a ref
- * from the caller.
- */
-static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
-{
-       struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct netfs_io_subrequest *subreq, *next, *p;
-       struct iov_iter iter;
-       int ret;
-
-       trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
-
-       /* We don't want terminating writes trying to wake us up whilst we're
-        * still going through the list.
-        */
-       atomic_inc(&rreq->nr_copy_ops);
-
-       list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
-               if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
-                       list_del_init(&subreq->rreq_link);
-                       netfs_put_subrequest(subreq, false,
-                                            netfs_sreq_trace_put_no_copy);
-               }
-       }
-
-       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
-               /* Amalgamate adjacent writes */
-               while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
-                       next = list_next_entry(subreq, rreq_link);
-                       if (next->start != subreq->start + subreq->len)
-                               break;
-                       subreq->len += next->len;
-                       list_del_init(&next->rreq_link);
-                       netfs_put_subrequest(next, false,
-                                            netfs_sreq_trace_put_merged);
-               }
-
-               ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
-                                              subreq->len, rreq->i_size, true);
-               if (ret < 0) {
-                       trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
-                       trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
-                       continue;
-               }
-
-               iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
-                               subreq->start, subreq->len);
-
-               atomic_inc(&rreq->nr_copy_ops);
-               netfs_stat(&netfs_n_rh_write);
-               netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
-               trace_netfs_sreq(subreq, netfs_sreq_trace_write);
-               cres->ops->write(cres, subreq->start, &iter,
-                                netfs_rreq_copy_terminated, subreq);
-       }
-
-       /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
-       if (atomic_dec_and_test(&rreq->nr_copy_ops))
-               netfs_rreq_unmark_after_write(rreq, false);
-}
-
-static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
-{
-       struct netfs_io_request *rreq =
-               container_of(work, struct netfs_io_request, work);
-
-       netfs_rreq_do_write_to_cache(rreq);
-}
-
-static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
-{
-       rreq->work.func = netfs_rreq_write_to_cache_work;
-       if (!queue_work(system_unbound_wq, &rreq->work))
-               BUG();
-}
-
 /*
  * Handle a short read.
  */
        clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 
-       if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
-           test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
-               return netfs_rreq_write_to_cache(rreq);
-
        netfs_rreq_completed(rreq, was_async);
 }