enum pkvm_component_id {
        PKVM_ID_HOST,
        PKVM_ID_HYP,
+       PKVM_ID_FFA,
 };
 
 extern unsigned long hyp_nr_cpus;
 int __pkvm_host_unshare_hyp(u64 pfn);
 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
+int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
+int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
 
 bool addr_is_memory(phys_addr_t phys);
 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
 
        case PKVM_ID_HYP:
                ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
                break;
+       case PKVM_ID_FFA:
+               /*
+                * We only check the host; the secure side will check the other
+                * end when we forward the FFA call.
+                */
+               ret = 0;
+               break;
        default:
                ret = -EINVAL;
        }
        case PKVM_ID_HYP:
                ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
                break;
+       case PKVM_ID_FFA:
+               /*
+                * We're not responsible for any secure page-tables, so there's
+                * nothing to do here.
+                */
+               ret = 0;
+               break;
        default:
                ret = -EINVAL;
        }
        case PKVM_ID_HYP:
                ret = hyp_ack_unshare(completer_addr, tx);
                break;
+       case PKVM_ID_FFA:
+               /* See check_share() */
+               ret = 0;
+               break;
        default:
                ret = -EINVAL;
        }
        case PKVM_ID_HYP:
                ret = hyp_complete_unshare(completer_addr, tx);
                break;
+       case PKVM_ID_FFA:
+               /* See __do_share() */
+               ret = 0;
+               break;
        default:
                ret = -EINVAL;
        }
        hyp_unlock_component();
        host_unlock_component();
 }
+
+int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
+{
+       int ret;
+       struct pkvm_mem_share share = {
+               .tx     = {
+                       .nr_pages       = nr_pages,
+                       .initiator      = {
+                               .id     = PKVM_ID_HOST,
+                               .addr   = hyp_pfn_to_phys(pfn),
+                       },
+                       .completer      = {
+                               .id     = PKVM_ID_FFA,
+                       },
+               },
+       };
+
+       host_lock_component();
+       ret = do_share(&share);
+       host_unlock_component();
+
+       return ret;
+}
+
+int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
+{
+       int ret;
+       struct pkvm_mem_share share = {
+               .tx     = {
+                       .nr_pages       = nr_pages,
+                       .initiator      = {
+                               .id     = PKVM_ID_HOST,
+                               .addr   = hyp_pfn_to_phys(pfn),
+                       },
+                       .completer      = {
+                               .id     = PKVM_ID_FFA,
+                       },
+               },
+       };
+
+       host_lock_component();
+       ret = do_unshare(&share);
+       host_unlock_component();
+
+       return ret;
+}