#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include "optee_private.h"
-int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align,
- int (*shm_register)(struct tee_context *ctx,
- struct tee_shm *shm,
- struct page **pages,
- size_t num_pages,
- unsigned long start))
-{
- size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
- struct page **pages;
- unsigned int i;
- int rc = 0;
-
- /*
- * Ignore alignment since this is already going to be page aligned
- * and there's no need for any larger alignment.
- */
- shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
- GFP_KERNEL | __GFP_ZERO);
- if (!shm->kaddr)
- return -ENOMEM;
-
- shm->paddr = virt_to_phys(shm->kaddr);
- shm->size = nr_pages * PAGE_SIZE;
-
- pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < nr_pages; i++)
- pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
-
- shm->pages = pages;
- shm->num_pages = nr_pages;
-
- if (shm_register) {
- rc = shm_register(shm->ctx, shm, pages, nr_pages,
- (unsigned long)shm->kaddr);
- if (rc)
- goto err;
- }
-
- return 0;
-err:
- free_pages_exact(shm->kaddr, shm->size);
- shm->kaddr = NULL;
- return rc;
-}
-
-void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- int (*shm_unregister)(struct tee_context *ctx,
- struct tee_shm *shm))
-{
- if (shm_unregister)
- shm_unregister(shm->ctx, shm);
- free_pages_exact(shm->kaddr, shm->size);
- shm->kaddr = NULL;
- kfree(shm->pages);
- shm->pages = NULL;
-}
-
static void optee_bus_scan(struct work_struct *work)
{
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
struct tee_shm *shm, size_t size, size_t align)
{
- return optee_pool_op_alloc_helper(pool, shm, size, align,
- optee_ffa_shm_register);
+ return tee_dyn_shm_alloc_helper(shm, size, align,
+ optee_ffa_shm_register);
}
static void pool_ffa_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
- optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
+ tee_dyn_shm_free_helper(shm, optee_ffa_shm_unregister);
}
static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
int optee_enumerate_devices(u32 func);
void optee_unregister_devices(void);
-int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align,
- int (*shm_register)(struct tee_context *ctx,
- struct tee_shm *shm,
- struct page **pages,
- size_t num_pages,
- unsigned long start));
-void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- int (*shm_unregister)(struct tee_context *ctx,
- struct tee_shm *shm));
-
-
void optee_remove_common(struct optee *optee);
int optee_open(struct tee_context *ctx, bool cap_memref_null);
void optee_release(struct tee_context *ctx);
* to be registered with OP-TEE.
*/
if (shm->flags & TEE_SHM_PRIV)
- return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
+ return tee_dyn_shm_alloc_helper(shm, size, align, NULL);
- return optee_pool_op_alloc_helper(pool, shm, size, align,
- optee_shm_register);
+ return tee_dyn_shm_alloc_helper(shm, size, align, optee_shm_register);
}
static void pool_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
if (!(shm->flags & TEE_SHM_PRIV))
- optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
+ tee_dyn_shm_free_helper(shm, optee_shm_unregister);
else
- optee_pool_op_free_helper(pool, shm, NULL);
+ tee_dyn_shm_free_helper(shm, NULL);
}
static void pool_op_destroy_pool(struct tee_shm_pool *pool)
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/idr.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
+{
+ size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ struct page **pages;
+ unsigned int i;
+ int rc = 0;
+
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
+ shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!shm->kaddr)
+ return -ENOMEM;
+
+ shm->paddr = virt_to_phys(shm->kaddr);
+ shm->size = nr_pages * PAGE_SIZE;
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
+
+ shm->pages = pages;
+ shm->num_pages = nr_pages;
+
+ if (shm_register) {
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+err:
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper);
+
+void tee_dyn_shm_free_helper(struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm))
+{
+ if (shm_unregister)
+ shm_unregister(shm->ctx, shm);
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ kfree(shm->pages);
+ shm->pages = NULL;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper);
+
static struct tee_shm *
register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
int id)
*/
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start));
+void tee_dyn_shm_free_helper(struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm));
+
/**
* tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
* @shm: Shared memory handle