摘要:
相对于上一篇测试程序CMA连续物理内存用户空间映射---(一)
增加功能:
1、分配和映射统一放在IOCTL,一次完成,可以连续多次分配并映射到用户空间,提高操作性;
2、驱动增加链表,使分配的多块内存在链表中管理,方便添加删除;
3、增加内存释放和解除映射;
4、使用rmmod删除驱动模块时,将释放所有内存;
映射流程:
1、用户通过IOCTL分配大小传给驱动ioctl------------------------------------->
2、驱动根据用户是否使用 writebuffer,来使用dma_alloc_writecombine或者dma_alloc_coherent,物理内存---------------------------->
3、通过vm_mmap,在用户空间找一块空闲空间来供映射使用-------------------------------->
vm_mmap在大于linux3.7内核版本中才能使用,在老内核中可以使用sys_mmap
参考mmap的call stack
[ 409.762850] [<c00184c4>] (unwind_backtrace+0x0/0xf8) from [<bf000020>] (cmamem_mmap+0x20/0xd0 [cma_mem]) [ 409.774141] [<bf000020>] (cmamem_mmap+0x20/0xd0 [cma_mem]) from [<c0095ab8>] (mmap_region+0x310/0x540) [ 409.774771] [<c0095ab8>] (mmap_region+0x310/0x540) from [<c0095f80>] (do_mmap_pgoff+0x298/0x330) [ 409.784230] [<c0095f80>] (do_mmap_pgoff+0x298/0x330) from [<c00886d0>] (vm_mmap_pgoff+0x64/0x94) [ 409.792291] [<c00886d0>] (vm_mmap_pgoff+0x64/0x94) from [<c00947a8>] (sys_mmap_pgoff+0x54/0xa8) [ 409.800962] [<c00947a8>] (sys_mmap_pgoff+0x54/0xa8) from [<c0013940>] (ret_fast_syscall+0x0/0x30)
4、vm_mmap将会调用驱动中的mmap接口函数
在mmap中通过remap_pfn_range实现物理内存到用户空间的映射.;
5、讲映射好的用户空间及内核空间虚拟内核和物理内存保存到链表中;
6、删除操作时,查询链表,解除映射,释放内存,从链表移除;
7、驱动模块释放时,释放所有内存;
源码:
驱动:
cma_mem.c
#include <linux/miscdevice.h> #include <linux/platform_device.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/debugfs.h> #include <linux/mempolicy.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/mman.h> #include "cma_mem.h" #define DEVICE_NAME "cma_mem" #define MEM_DEBUG 1 enum cma_status{ UNKNOW_STATUS = 0, HAVE_ALLOCED = 1, HAVE_MMAPED =2, }; struct cmamem_dev { unsigned int count; struct miscdevice dev; struct mutex cmamem_lock; }; struct cmamem_block { char name[10]; char is_use_buffer; char is_free; int id; unsigned long offset; unsigned long len; unsigned long phy_base; unsigned long mem_base; void *kernel_base; struct list_head memqueue_list; }; struct current_status{ int status; int id_count; dma_addr_t phy_base; }; static struct current_status cmamem_status; static struct cmamem_dev cmamem_dev; static struct cmamem_block *cmamem_block_head; static int mem_block_count = 0; static void dump_mem(struct cmamem_block *memory_block) { printk("%s:CMA name:%s\n",__func__, memory_block->name); printk("%s:CMA id:%d\n",__func__, memory_block->id); printk("%s:Is usebuf:%d\n",__func__, memory_block->is_use_buffer); printk("%s:PHY Base:0x%08lx\n",__func__, memory_block->phy_base); printk("%s:KER Base:0x%08x\n",__func__, (unsigned int)(memory_block->kernel_base)); printk("%s:USR Base:0x%08lx\n",__func__, memory_block->mem_base); } static long cmamem_alloc(struct file *file, unsigned long arg) { struct cmamem_block *memory_block; struct mem_block cma_info_temp; int size; int ret; if ((ret = copy_from_user(&cma_info_temp, (void __user *)arg, sizeof(struct mem_block)))) { printk(KERN_ERR"cmamem_alloc:copy_from_user error:%d\n", ret); return -1; } if(cma_info_temp.name[0] == '\0') { printk(KERN_ERR "%s, no set mem name, please set\n", __func__); return -1; } if(cma_info_temp.len){ size = PAGE_ALIGN(cma_info_temp.len); cma_info_temp.len = size; #ifdef MEM_DEBUG // printk(KERN_INFO "%s len:%ld, is_use_buffer:%d\n", __func__, cma_info_temp.len, cma_info_temp.is_use_buffer); #endif if(cma_info_temp.is_use_buffer) cma_info_temp.kernel_base = dma_alloc_writecombine(NULL, size, (dma_addr_t *)(&(cma_info_temp.phy_base)), GFP_KERNEL); else cma_info_temp.kernel_base = dma_alloc_coherent(NULL, size, (dma_addr_t *)(&(cma_info_temp.phy_base)), GFP_KERNEL); if (!cma_info_temp.phy_base){ printk(KERN_ERR "dma alloc fail:%d!\n", __LINE__); return -ENOMEM; } cma_info_temp.id = ++mem_block_count; cmamem_status.phy_base = cma_info_temp.phy_base; cmamem_status.id_count = cma_info_temp.id; cmamem_status.status = HAVE_ALLOCED; cma_info_temp.mem_base = vm_mmap(file, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0); if(cma_info_temp.mem_base < 0) { printk(KERN_ERR "do_mmap fail:%d!\n", __LINE__); cma_info_temp.id = --mem_block_count; return -ENOMEM; } printk(KERN_INFO "cma_info_temp.mem_base:0x%lx\n", cma_info_temp.mem_base); //mem_block_count ++; } else{ printk(KERN_ERR"cmamem_alloc: the len is NULL\n"); return -1; } if(copy_to_user((void __user *)arg, (void *)(&cma_info_temp), sizeof(struct mem_block))) return -EFAULT; /* setup the memory block */ memory_block = (struct cmamem_block *)kmalloc(sizeof(struct cmamem_block), GFP_KERNEL); if(memory_block == NULL) { printk(KERN_ERR "%s error line:%d\n", __func__, __LINE__); mem_block_count --; return -1; } if(cma_info_temp.name[0] != '\0') memcpy(memory_block->name, cma_info_temp.name, 10); memory_block->id = cma_info_temp.id; memory_block->is_free = 0; memory_block->is_use_buffer = cma_info_temp.is_use_buffer; memory_block->mem_base = cma_info_temp.mem_base; memory_block->kernel_base = cma_info_temp.kernel_base; memory_block->phy_base = cma_info_temp.phy_base; memory_block->len = cma_info_temp.len; #ifdef MEM_DEBUG dump_mem(memory_block); #endif #ifdef CMA_TEST int i; for(i = 0; i < 10; i++) ((char *)(cma_info_temp.kernel_base))[i] = (cma_info_temp.id * i); #endif /* add to memory block queue */ list_add_tail(&memory_block->memqueue_list, &cmamem_block_head->memqueue_list); return 0; } static int cmamem_free(struct file *file, unsigned long arg) { struct cmamem_block *memory_block; struct mem_block cma_info_temp; int ret; if ((ret = copy_from_user(&cma_info_temp, (void __user *)arg, sizeof(struct mem_block)))) { printk(KERN_ERR"cmamem_alloc:copy_from_user error:%d\n", ret); return -1; } printk(KERN_INFO "will delete the mem name:%s\n", cma_info_temp.name); list_for_each_entry(memory_block, &cmamem_block_head->memqueue_list, memqueue_list) { if(memory_block){ //if(memory_block->id == cma_info_temp.id || !strcmp(cma_info_temp.name, memory_block->name)){ if(!strcmp(cma_info_temp.name, memory_block->name)){ if(memory_block->is_free == 0){ printk(KERN_INFO "delete the mem id:%d, name:%s\n", cma_info_temp.id, cma_info_temp.name); vm_munmap(memory_block->mem_base, memory_block->len); if(memory_block->is_use_buffer) dma_free_coherent(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base); else dma_free_writecombine(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base); memory_block->is_free = 1; list_del(&memory_block->memqueue_list); break; } } } } return 0; } static int cmamem_freeall(void) { struct cmamem_block *memory_block; printk(KERN_INFO "will delete all cma mem\n"); list_for_each_entry(memory_block, &cmamem_block_head->memqueue_list, memqueue_list) { if(memory_block && memory_block->id > 0){ if(memory_block->is_free == 0){ printk(KERN_INFO "delete the mem id:%d, name:%s\n", memory_block->id, memory_block->name); if(memory_block->is_use_buffer) dma_free_coherent(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base); else dma_free_writecombine(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base); memory_block->is_free = 1; } } } return 0; } static long cmamem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; switch(cmd){ case CMEM_ALLOCATE: { printk(KERN_ERR"cmamem_ioctl:CMEM_ALLOCATE\n"); mutex_lock(&cmamem_dev.cmamem_lock); cmamem_alloc(file, arg); if(ret < 0) goto alloc_err; mutex_unlock(&cmamem_dev.cmamem_lock); break; } case CMEM_UNMAP: { printk(KERN_ERR"cmamem_ioctl:CMEM_UNMAP\n"); mutex_lock(&cmamem_dev.cmamem_lock); ret = cmamem_free(file, arg); if(ret < 0) goto free_err; mutex_unlock(&cmamem_dev.cmamem_lock); break; } default: { printk(KERN_INFO "cma mem not support command\n"); break; } } return 0; alloc_err: mutex_unlock(&cmamem_dev.cmamem_lock); printk(KERN_ERR "%s alloc error\n", __func__); return ret; free_err: mutex_unlock(&cmamem_dev.cmamem_lock); printk(KERN_ERR "%s free error\n", __func__); return ret; } static int cmamem_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long page, pos; //if(size > MMAP_MEM_SIZE) // return -EINVAL; if(cmamem_status.status != HAVE_ALLOCED) { printk(KERN_ERR"%s, you should allocted memory firstly\n", __func__); return -EINVAL; } // printk( "cmamem_mmap:vma:start=0x%08x offset=0x%08x\n", (unsigned int)start, (unsigned int)offset ); pos = (unsigned long)cmamem_status.phy_base + offset; page = pos >> PAGE_SHIFT ; // printk( "cmamem_status.phy_base:0x%08x\n", (unsigned int)cmamem_status.phy_base); if( remap_pfn_range( vma, start, page, size, PAGE_SHARED )) { return -EAGAIN; } else{ // printk( "remap_pfn_range %u\n success\n", (unsigned int)page ); } vma->vm_flags &= ~VM_IO; vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP); cmamem_status.status = HAVE_MMAPED; return 0; } static struct file_operations dev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = cmamem_ioctl, .mmap = cmamem_mmap, }; static int __init cmamem_init(void) { printk(KERN_INFO "%s\n", __func__); mutex_init(&cmamem_dev.cmamem_lock); //NIT_LIST_HEAD(&cmamem_dev.info_list); cmamem_dev.count = 0; cmamem_dev.dev.name = DEVICE_NAME; cmamem_dev.dev.minor = MISC_DYNAMIC_MINOR; cmamem_dev.dev.fops = &dev_fops; cmamem_block_head = (struct cmamem_block *)kmalloc(sizeof(struct cmamem_block), GFP_KERNEL); cmamem_block_head->id = -1; mem_block_count = 0; INIT_LIST_HEAD(&cmamem_block_head->memqueue_list); /* cmamem_status.status = UNKNOW_STATUS; cmamem_status.id_count = -1; cmamem_status.phy_base = 0; */ return misc_register(&cmamem_dev.dev); } static void __exit cmamem_exit(void) { printk(KERN_ERR"%s\n", __func__); cmamem_freeall(); misc_deregister(&cmamem_dev.dev); } module_init(cmamem_init); module_exit(cmamem_exit); MODULE_LICENSE("GPL");
cma_mem.h
#ifndef _CMA_MEM_H_ #define _CMA_MEM_H_ #define CMEM_IOCTL_MAGIC 'm' #define CMEM_GET_PHYS _IOW(CMEM_IOCTL_MAGIC, 1, unsigned int) #define CMEM_MAP _IOW(CMEM_IOCTL_MAGIC, 2, unsigned int) #define CMEM_GET_SIZE _IOW(CMEM_IOCTL_MAGIC, 3, unsigned int) #define CMEM_UNMAP _IOW(CMEM_IOCTL_MAGIC, 4, unsigned int) #define CMEM_ALLOCATE _IOW(CMEM_IOCTL_MAGIC, 5, unsigned int) #define CMEM_CONNECT _IOW(CMEM_IOCTL_MAGIC, 6, unsigned int) #define CMEM_GET_TOTAL_SIZE _IOW(CMEM_IOCTL_MAGIC, 7, unsigned int) #define CMEM_CACHE_FLUSH _IOW(CMEM_IOCTL_MAGIC, 8, unsigned int) struct mem_block { char name[10]; char is_use_buffer; int id; unsigned long offset; unsigned long len; unsigned long phy_base; unsigned long mem_base; void *kernel_base; }; #endif
用户测试程序:
#include <stdio.h> #include <stdarg.h> #include <string.h> #include <errno.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <time.h> #include <sys/mman.h> #include <assert.h> #include <linux/videodev2.h> #include <linux/fb.h> #include <pthread.h> #include <poll.h> #include <semaphore.h> #define CMEM_IOCTL_MAGIC 'm' #define CMEM_GET_PHYS _IOW(CMEM_IOCTL_MAGIC, 1, unsigned int) #define CMEM_MAP _IOW(CMEM_IOCTL_MAGIC, 2, unsigned int) #define CMEM_GET_SIZE _IOW(CMEM_IOCTL_MAGIC, 3, unsigned int) #define CMEM_UNMAP _IOW(CMEM_IOCTL_MAGIC, 4, unsigned int) #define CMEM_ALLOCATE _IOW(CMEM_IOCTL_MAGIC, 5, unsigned int) #define CMEM_CONNECT _IOW(CMEM_IOCTL_MAGIC, 6, unsigned int) #define CMEM_GET_TOTAL_SIZE _IOW(CMEM_IOCTL_MAGIC, 7, unsigned int) #define CMEM_CACHE_FLUSH _IOW(CMEM_IOCTL_MAGIC, 8, unsigned int) struct cmamem_info { char name[10]; char is_use_buffer; int id; unsigned long offset; unsigned long len; unsigned long phy_base; unsigned long mem_base; void *kernel_base; }; struct mem_block { char name[10]; char is_use_buffer; int id; unsigned long offset; unsigned long len; unsigned long phy_base; unsigned long mem_base; void *kernel_base; }; int main() { int cmem_fd; void *cmem_base; unsigned int size; struct mem_block region; int i,j; char str[10]; memset(®ion, 0x00, sizeof(struct mem_block)); cmem_fd = open("/dev/cma_mem", O_RDWR, 0);//打开设备,为了操作硬件引擎,要noncache的 printf("cmem_fd:%d\n", cmem_fd); j = 0; if (cmem_fd >= 0) while(j <= 2) { j++; sprintf(str, "mem%d", j); memset(®ion, 0x00, sizeof(struct mem_block)); region.len = 800 * 480 * 4; region.is_use_buffer = 1; memcpy(region.name, str, strlen(str)); printf("sizeof(struct mem_block):%d\n", sizeof(struct mem_block)); printf("region.mem_base:0x%08x\n", region.mem_base); if (ioctl(cmem_fd, CMEM_ALLOCATE, ®ion) < 0) //获取全部空间 { perror("PMEM_GET_TOTAL_SIZE failed\n"); return -1; } //size = region.len; printf("region.len:0x%08x offset:0x%08x\n",region.len, region.offset); printf("region.mem_base:0x%08x\n", region.mem_base); for(i = 0; i < 10; i++) printf("%d\n", ((char *)(region.mem_base))[i]); /* cmem_base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, cmem_fd, 0);//mmap操作 if (cmem_base == MAP_FAILED) { cmem_base = 0; close(cmem_fd); cmem_fd = -1; perror("mmap pmem error!\n"); } for(i = 0; i < 10; i++) ((unsigned int *)cmem_base)[i] = i; printf("pmem_base:0x%08x\n", cmem_base); for(i = 0; i < 10; i++) printf("%d\n", ((unsigned int *)cmem_base)[i]); */ printf("\n\n ********************* \n\n"); } printf("free the mem\n"); getchar(); j = 0; /* while(j <= 2)//释放测试 { j++; sprintf(str, "mem%d", j); memset(®ion, 0x00, sizeof(struct mem_block)); region.id = j; region.is_use_buffer = 1; memcpy(region.name, str, strlen(str)); printf("user will del:%s, id = %d\n", str, region.id); if (ioctl(cmem_fd, CMEM_UNMAP, ®ion) < 0) //获取全部空间 { perror("PMEM_GET_TOTAL_SIZE failed\n"); return -1; } } getchar();*/ close(cmem_fd); return 0; }
CMA连续物理内存用户空间映射---(二)
时间: 2024-10-10 07:41:54