mirror of
https://github.com/Evolution-X-Devices/kernel_google_b1c1
synced 2026-02-01 07:34:20 +00:00
cma: Store a name in the cma structure
Frameworks that may want to enumerate CMA heaps (e.g. Ion) will find it useful to have an explicit name attached to each region. Store the name in each CMA structure. Change-Id: Ia4d4fdc7b6044bffe3238e53e78e4e47206d7f91 Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Git-commit: f318dd083c8128c50e48ceb8c3e812e52800fc4f Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Prakash Gupta <guptap@codeaurora.org>
This commit is contained in:
committed by
Prakash Gupta
parent
2f90dca7b4
commit
77dbb1d1c7
@@ -97,7 +97,8 @@ void __init kvm_cma_reserve(void)
|
||||
(unsigned long)selected_size / SZ_1M);
|
||||
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
|
||||
cma_declare_contiguous(0, selected_size, 0, align_size,
|
||||
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
|
||||
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
|
||||
&kvm_cma);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -165,7 +165,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
|
||||
ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
|
||||
"reserved", res_cma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -257,7 +258,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
|
||||
err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
|
||||
if (err) {
|
||||
pr_err("Reserved memory: unable to setup CMA region\n");
|
||||
return err;
|
||||
|
||||
@@ -18,13 +18,15 @@ struct cma;
|
||||
extern unsigned long totalcma_pages;
|
||||
extern phys_addr_t cma_get_base(const struct cma *cma);
|
||||
extern unsigned long cma_get_size(const struct cma *cma);
|
||||
extern const char *cma_get_name(const struct cma *cma);
|
||||
|
||||
extern int __init cma_declare_contiguous(phys_addr_t base,
|
||||
phys_addr_t size, phys_addr_t limit,
|
||||
phys_addr_t alignment, unsigned int order_per_bit,
|
||||
bool fixed, struct cma **res_cma);
|
||||
bool fixed, const char *name, struct cma **res_cma);
|
||||
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
unsigned int order_per_bit,
|
||||
const char *name,
|
||||
struct cma **res_cma);
|
||||
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
|
||||
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
|
||||
|
||||
17
mm/cma.c
17
mm/cma.c
@@ -54,6 +54,11 @@ unsigned long cma_get_size(const struct cma *cma)
|
||||
return cma->count << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
const char *cma_get_name(const struct cma *cma)
|
||||
{
|
||||
return cma->name ? cma->name : "(undefined)";
|
||||
}
|
||||
|
||||
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
|
||||
int align_order)
|
||||
{
|
||||
@@ -173,6 +178,7 @@ core_initcall(cma_init_reserved_areas);
|
||||
*/
|
||||
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
unsigned int order_per_bit,
|
||||
const char *name,
|
||||
struct cma **res_cma)
|
||||
{
|
||||
struct cma *cma;
|
||||
@@ -203,6 +209,13 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
* subsystems (like slab allocator) are available.
|
||||
*/
|
||||
cma = &cma_areas[cma_area_count];
|
||||
if (name) {
|
||||
cma->name = name;
|
||||
} else {
|
||||
cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
|
||||
if (!cma->name)
|
||||
return -ENOMEM;
|
||||
}
|
||||
cma->base_pfn = PFN_DOWN(base);
|
||||
cma->count = size >> PAGE_SHIFT;
|
||||
cma->order_per_bit = order_per_bit;
|
||||
@@ -234,7 +247,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
int __init cma_declare_contiguous(phys_addr_t base,
|
||||
phys_addr_t size, phys_addr_t limit,
|
||||
phys_addr_t alignment, unsigned int order_per_bit,
|
||||
bool fixed, struct cma **res_cma)
|
||||
bool fixed, const char *name, struct cma **res_cma)
|
||||
{
|
||||
phys_addr_t memblock_end = memblock_end_of_DRAM();
|
||||
phys_addr_t highmem_start;
|
||||
@@ -345,7 +358,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
||||
base = addr;
|
||||
}
|
||||
|
||||
ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
|
||||
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
||||
1
mm/cma.h
1
mm/cma.h
@@ -11,6 +11,7 @@ struct cma {
|
||||
struct hlist_head mem_head;
|
||||
spinlock_t mem_head_lock;
|
||||
#endif
|
||||
const char *name;
|
||||
};
|
||||
|
||||
extern struct cma cma_areas[MAX_CMA_AREAS];
|
||||
|
||||
@@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
|
||||
char name[16];
|
||||
int u32s;
|
||||
|
||||
sprintf(name, "cma-%d", idx);
|
||||
sprintf(name, "cma-%s", cma->name);
|
||||
|
||||
tmp = debugfs_create_dir(name, cma_debugfs_root);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user