CMA连续物理内存用户空间映射---(二)

摘要:

相对于上一篇测试程序CMA连续物理内存用户空间映射---(一)

增加功能:

1、分配和映射统一放在IOCTL,一次完成,可以连续多次分配并映射到用户空间,提高操作性;

2、驱动增加链表,使分配的多块内存在链表中管理,方便添加删除;

3、增加内存释放和解除映射;

4、使用rmmod删除驱动模块时,将释放所有内存;

映射流程:

1、用户通过IOCTL分配大小传给驱动ioctl------------------------------------->

2、驱动根据用户是否使用 writebuffer,来使用dma_alloc_writecombine或者dma_alloc_coherent,物理内存---------------------------->

3、通过vm_mmap,在用户空间找一块空闲空间来供映射使用-------------------------------->

vm_mmap在大于linux3.7内核版本中才能使用,在老内核中可以使用sys_mmap

参考mmap的call stack

[  409.762850] [<c00184c4>] (unwind_backtrace+0x0/0xf8) from [<bf000020>] (cmamem_mmap+0x20/0xd0 [cma_mem])
[  409.774141] [<bf000020>] (cmamem_mmap+0x20/0xd0 [cma_mem]) from [<c0095ab8>] (mmap_region+0x310/0x540)
[  409.774771] [<c0095ab8>] (mmap_region+0x310/0x540) from [<c0095f80>] (do_mmap_pgoff+0x298/0x330)
[  409.784230] [<c0095f80>] (do_mmap_pgoff+0x298/0x330) from [<c00886d0>] (vm_mmap_pgoff+0x64/0x94)
[  409.792291] [<c00886d0>] (vm_mmap_pgoff+0x64/0x94) from [<c00947a8>] (sys_mmap_pgoff+0x54/0xa8)
[  409.800962] [<c00947a8>] (sys_mmap_pgoff+0x54/0xa8) from [<c0013940>] (ret_fast_syscall+0x0/0x30)

4、vm_mmap将会调用驱动中的mmap接口函数

在mmap中通过remap_pfn_range实现物理内存到用户空间的映射.;

5、讲映射好的用户空间及内核空间虚拟内核和物理内存保存到链表中;

6、删除操作时,查询链表,解除映射,释放内存,从链表移除;

7、驱动模块释放时,释放所有内存;

源码:

驱动:

cma_mem.c

#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/mempolicy.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/mman.h>

#include "cma_mem.h"

#define DEVICE_NAME "cma_mem" 

#define MEM_DEBUG 1

enum cma_status{
	UNKNOW_STATUS = 0,
	HAVE_ALLOCED = 1,
	HAVE_MMAPED =2,
};

struct cmamem_dev {
	unsigned int count;
	struct miscdevice dev;
	struct mutex cmamem_lock;
};

struct cmamem_block {
	char name[10];
	char is_use_buffer;
	char is_free;
	int id;
	unsigned long offset;
	unsigned long len;
	unsigned long phy_base;
	unsigned long mem_base;
	void *kernel_base;
	struct list_head memqueue_list;
};

struct current_status{
		int status;
		int id_count;
		dma_addr_t phy_base;
};

static struct current_status cmamem_status;
static struct cmamem_dev cmamem_dev;
static struct cmamem_block *cmamem_block_head;
static int mem_block_count = 0;

static void dump_mem(struct cmamem_block *memory_block)
{
	printk("%s:CMA name:%s\n",__func__,  memory_block->name);
	printk("%s:CMA id:%d\n",__func__,  memory_block->id);
	printk("%s:Is usebuf:%d\n",__func__,  memory_block->is_use_buffer);
	printk("%s:PHY Base:0x%08lx\n",__func__,  memory_block->phy_base);
	printk("%s:KER Base:0x%08x\n",__func__,  (unsigned int)(memory_block->kernel_base));
	printk("%s:USR Base:0x%08lx\n",__func__,  memory_block->mem_base);
}
static long cmamem_alloc(struct file *file, unsigned long arg)
{
	struct cmamem_block *memory_block;
	struct mem_block cma_info_temp;
	int size;
	int ret;

	if ((ret = copy_from_user(&cma_info_temp, (void __user *)arg,
	sizeof(struct mem_block))))
	{
		printk(KERN_ERR"cmamem_alloc:copy_from_user error:%d\n", ret);
		return -1;
	}

	if(cma_info_temp.name[0] == '\0')
	{
		printk(KERN_ERR "%s, no set mem name, please set\n", __func__);
		return -1;
	}

	if(cma_info_temp.len){

		size = PAGE_ALIGN(cma_info_temp.len);

		cma_info_temp.len = size;
#ifdef	MEM_DEBUG
	//	printk(KERN_INFO "%s len:%ld, is_use_buffer:%d\n", __func__, cma_info_temp.len, cma_info_temp.is_use_buffer);
#endif
		if(cma_info_temp.is_use_buffer)
			cma_info_temp.kernel_base = dma_alloc_writecombine(NULL, size, (dma_addr_t *)(&(cma_info_temp.phy_base)), GFP_KERNEL);
		else
			cma_info_temp.kernel_base = dma_alloc_coherent(NULL, size, (dma_addr_t *)(&(cma_info_temp.phy_base)), GFP_KERNEL);

		if (!cma_info_temp.phy_base){
				printk(KERN_ERR "dma alloc fail:%d!\n", __LINE__);
				return -ENOMEM;
			}

		cma_info_temp.id = ++mem_block_count;

		cmamem_status.phy_base = 	cma_info_temp.phy_base;
		cmamem_status.id_count =  	cma_info_temp.id;
		cmamem_status.status = HAVE_ALLOCED;

		cma_info_temp.mem_base = vm_mmap(file, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0);
		if(cma_info_temp.mem_base < 0)
		{
				printk(KERN_ERR "do_mmap fail:%d!\n", __LINE__);
				cma_info_temp.id = --mem_block_count;
				return -ENOMEM;
		}
		printk(KERN_INFO "cma_info_temp.mem_base:0x%lx\n", cma_info_temp.mem_base);
		//mem_block_count ++;

	}
	else{

		printk(KERN_ERR"cmamem_alloc: the len is NULL\n");
		return -1;
	}	

	if(copy_to_user((void __user *)arg, (void *)(&cma_info_temp), sizeof(struct mem_block)))
		return -EFAULT;

	/* setup the memory block */
	memory_block = (struct cmamem_block *)kmalloc(sizeof(struct cmamem_block), GFP_KERNEL);
	if(memory_block == NULL)
	{
		printk(KERN_ERR "%s error line:%d\n", __func__, __LINE__);
		mem_block_count --;
		return -1;
	}

	if(cma_info_temp.name[0] != '\0')
		memcpy(memory_block->name, cma_info_temp.name, 10);

	memory_block->id		=	cma_info_temp.id;
	memory_block->is_free	=	0;
	memory_block->is_use_buffer	=	cma_info_temp.is_use_buffer;
	memory_block->mem_base 	=	cma_info_temp.mem_base;
	memory_block->kernel_base 	=	cma_info_temp.kernel_base;
	memory_block->phy_base 	=	cma_info_temp.phy_base;
	memory_block->len		=	cma_info_temp.len;

#ifdef	MEM_DEBUG
	dump_mem(memory_block);
#endif
#ifdef CMA_TEST
	int i;
	for(i = 0; i < 10; i++)
		((char *)(cma_info_temp.kernel_base))[i] = (cma_info_temp.id * i);
#endif
	/* add to memory block queue */
	list_add_tail(&memory_block->memqueue_list, &cmamem_block_head->memqueue_list);

	return 0;
}
static int cmamem_free(struct file *file, unsigned long arg)
{
	struct cmamem_block *memory_block;
	struct mem_block cma_info_temp;
	int ret;

	if ((ret = copy_from_user(&cma_info_temp, (void __user *)arg,
	sizeof(struct mem_block))))
	{
		printk(KERN_ERR"cmamem_alloc:copy_from_user error:%d\n", ret);
		return -1;
	}
	printk(KERN_INFO "will delete the mem name:%s\n", cma_info_temp.name);

	list_for_each_entry(memory_block, &cmamem_block_head->memqueue_list, memqueue_list)
	{
		if(memory_block){
			//if(memory_block->id == cma_info_temp.id || !strcmp(cma_info_temp.name, memory_block->name)){
			if(!strcmp(cma_info_temp.name, memory_block->name)){
				if(memory_block->is_free == 0){

					printk(KERN_INFO "delete the mem id:%d, name:%s\n", cma_info_temp.id, cma_info_temp.name);

					vm_munmap(memory_block->mem_base, memory_block->len);

					if(memory_block->is_use_buffer)
						dma_free_coherent(NULL,	memory_block->len, memory_block->kernel_base, memory_block->phy_base);
					else
						dma_free_writecombine(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base);

					memory_block->is_free = 1;

					list_del(&memory_block->memqueue_list);

					break;
				}

			}
		}
	}  

	return 0;
}
static int cmamem_freeall(void)
{
	struct cmamem_block *memory_block;

	printk(KERN_INFO "will delete all cma mem\n");

	list_for_each_entry(memory_block, &cmamem_block_head->memqueue_list, memqueue_list)
	{
		if(memory_block && memory_block->id > 0){
				if(memory_block->is_free == 0){
					printk(KERN_INFO "delete the mem id:%d, name:%s\n", memory_block->id, memory_block->name);

					if(memory_block->is_use_buffer)
						dma_free_coherent(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base);
					else
						dma_free_writecombine(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base);

					memory_block->is_free = 1;

				}
		}
	}  

	return 0;
}
static long cmamem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{

	int ret = 0;

	switch(cmd){
		case CMEM_ALLOCATE:
		{
			printk(KERN_ERR"cmamem_ioctl:CMEM_ALLOCATE\n");
			mutex_lock(&cmamem_dev.cmamem_lock);

			cmamem_alloc(file, arg);
			if(ret < 0)
				goto alloc_err;

			mutex_unlock(&cmamem_dev.cmamem_lock);
			break;
		}
		case CMEM_UNMAP:
		{
			printk(KERN_ERR"cmamem_ioctl:CMEM_UNMAP\n");
			mutex_lock(&cmamem_dev.cmamem_lock);

			ret = cmamem_free(file, arg);
			if(ret < 0)
				goto free_err;

			mutex_unlock(&cmamem_dev.cmamem_lock);
			break;
		}
		default:
		{
			printk(KERN_INFO "cma mem not support command\n");
			break;
		}
	}
	return 0;
	alloc_err:
		mutex_unlock(&cmamem_dev.cmamem_lock);
		printk(KERN_ERR "%s alloc error\n", __func__);
		return ret;
	free_err:
		mutex_unlock(&cmamem_dev.cmamem_lock);
		printk(KERN_ERR "%s free error\n", __func__);

	return ret;
}

static int cmamem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	unsigned long start = vma->vm_start;
	unsigned long size = vma->vm_end - vma->vm_start;
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	unsigned long page, pos;

	//if(size > MMAP_MEM_SIZE)
	//	return -EINVAL;
	if(cmamem_status.status != HAVE_ALLOCED)
	{
		printk(KERN_ERR"%s, you should allocted memory firstly\n", __func__);
		return -EINVAL;
	}

//	printk( "cmamem_mmap:vma:start=0x%08x offset=0x%08x\n", (unsigned int)start, (unsigned int)offset );

	pos = (unsigned long)cmamem_status.phy_base + offset;
	page = pos >> PAGE_SHIFT ;

//	printk( "cmamem_status.phy_base:0x%08x\n", (unsigned int)cmamem_status.phy_base);

	if( remap_pfn_range( vma, start, page, size, PAGE_SHARED )) {
		return -EAGAIN;
	}
	else{
	//	printk( "remap_pfn_range %u\n success\n", (unsigned int)page );
	}
	vma->vm_flags &= ~VM_IO;
	vma->vm_flags |=  (VM_DONTEXPAND | VM_DONTDUMP);

	cmamem_status.status = HAVE_MMAPED;
	return 0;
}

static struct file_operations dev_fops = {
    .owner          = THIS_MODULE,
    .unlocked_ioctl = cmamem_ioctl,
	.mmap = cmamem_mmap,
};

static int __init cmamem_init(void)
{
	printk(KERN_INFO "%s\n", __func__);
	mutex_init(&cmamem_dev.cmamem_lock);
//NIT_LIST_HEAD(&cmamem_dev.info_list);
	cmamem_dev.count = 0;
	cmamem_dev.dev.name = DEVICE_NAME;
	cmamem_dev.dev.minor = MISC_DYNAMIC_MINOR;
	cmamem_dev.dev.fops = &dev_fops;

	cmamem_block_head = (struct cmamem_block *)kmalloc(sizeof(struct cmamem_block), GFP_KERNEL);
	cmamem_block_head->id = -1;
	mem_block_count = 0;
	INIT_LIST_HEAD(&cmamem_block_head->memqueue_list);
/*
	cmamem_status.status = UNKNOW_STATUS;
	cmamem_status.id_count = -1;
	cmamem_status.phy_base = 0;
*/
	return misc_register(&cmamem_dev.dev);
}

static void __exit cmamem_exit(void)
{
    printk(KERN_ERR"%s\n", __func__);
	cmamem_freeall();
	misc_deregister(&cmamem_dev.dev);
} 

module_init(cmamem_init);
module_exit(cmamem_exit);
MODULE_LICENSE("GPL");

cma_mem.h

#ifndef _CMA_MEM_H_
#define _CMA_MEM_H_

#define CMEM_IOCTL_MAGIC 'm'
#define CMEM_GET_PHYS		_IOW(CMEM_IOCTL_MAGIC, 1, unsigned int)
#define CMEM_MAP		_IOW(CMEM_IOCTL_MAGIC, 2, unsigned int)
#define CMEM_GET_SIZE		_IOW(CMEM_IOCTL_MAGIC, 3, unsigned int)
#define CMEM_UNMAP		_IOW(CMEM_IOCTL_MAGIC, 4, unsigned int)

#define CMEM_ALLOCATE		_IOW(CMEM_IOCTL_MAGIC, 5, unsigned int)

#define CMEM_CONNECT		_IOW(CMEM_IOCTL_MAGIC, 6, unsigned int)

#define CMEM_GET_TOTAL_SIZE	_IOW(CMEM_IOCTL_MAGIC, 7, unsigned int)
#define CMEM_CACHE_FLUSH	_IOW(CMEM_IOCTL_MAGIC, 8, unsigned int)

struct mem_block {
	char name[10];
	char is_use_buffer;
	int id;
	unsigned long offset;
	unsigned long len;
	unsigned long phy_base;
	unsigned long mem_base;
	void *kernel_base;
};

#endif

用户测试程序:

#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <sys/mman.h>
#include <assert.h>
#include <linux/videodev2.h>
#include <linux/fb.h>
#include <pthread.h>
#include <poll.h>
#include <semaphore.h>

#define CMEM_IOCTL_MAGIC 'm'
#define CMEM_GET_PHYS		_IOW(CMEM_IOCTL_MAGIC, 1, unsigned int)
#define CMEM_MAP		_IOW(CMEM_IOCTL_MAGIC, 2, unsigned int)
#define CMEM_GET_SIZE		_IOW(CMEM_IOCTL_MAGIC, 3, unsigned int)
#define CMEM_UNMAP		_IOW(CMEM_IOCTL_MAGIC, 4, unsigned int)

#define CMEM_ALLOCATE		_IOW(CMEM_IOCTL_MAGIC, 5, unsigned int)

#define CMEM_CONNECT		_IOW(CMEM_IOCTL_MAGIC, 6, unsigned int)

#define CMEM_GET_TOTAL_SIZE	_IOW(CMEM_IOCTL_MAGIC, 7, unsigned int)
#define CMEM_CACHE_FLUSH	_IOW(CMEM_IOCTL_MAGIC, 8, unsigned int)

struct cmamem_info {
	char name[10];
	char is_use_buffer;
	int id;
	unsigned long offset;
	unsigned long len;
	unsigned long phy_base;
	unsigned long mem_base;
	void *kernel_base;
};

struct mem_block {
	char name[10];
	char is_use_buffer;
	int id;
	unsigned long offset;
	unsigned long len;
	unsigned long phy_base;
	unsigned long mem_base;
	void *kernel_base;
};

int main()
{
	int cmem_fd;
	void *cmem_base;
	unsigned int size;
	struct mem_block region;
	int i,j;
	char str[10];

	memset(&region, 0x00, sizeof(struct mem_block));

	cmem_fd = open("/dev/cma_mem", O_RDWR, 0);//打开设备,为了操作硬件引擎,要noncache的
	printf("cmem_fd:%d\n", cmem_fd);
	j = 0;
	if (cmem_fd >= 0)
	while(j <= 2)
	{
		j++;
		sprintf(str, "mem%d", j);
		memset(&region, 0x00, sizeof(struct mem_block));
		region.len = 800 * 480 * 4;
		region.is_use_buffer = 1;
		memcpy(region.name, str, strlen(str));
		printf("sizeof(struct mem_block):%d\n", sizeof(struct mem_block));
		printf("region.mem_base:0x%08x\n", region.mem_base);
		if (ioctl(cmem_fd, CMEM_ALLOCATE, &region) < 0) //获取全部空间
		{
			perror("PMEM_GET_TOTAL_SIZE failed\n");
			return -1;
		}

		//size = region.len;
		printf("region.len:0x%08x offset:0x%08x\n",region.len, region.offset);
		printf("region.mem_base:0x%08x\n", region.mem_base);
		for(i = 0; i < 10; i++)
		printf("%d\n", ((char *)(region.mem_base))[i]);
	/*	cmem_base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, cmem_fd, 0);//mmap操作

		if (cmem_base == MAP_FAILED)
		{	cmem_base = 0;
            close(cmem_fd);
            cmem_fd = -1;
			perror("mmap pmem error!\n");
		}
		for(i = 0; i < 10; i++)
		((unsigned int *)cmem_base)[i] = i;
		printf("pmem_base:0x%08x\n", cmem_base);
		for(i = 0; i < 10; i++)
		printf("%d\n", ((unsigned int *)cmem_base)[i]);
		*/
		printf("\n\n ********************* \n\n");
    }
	printf("free the mem\n");
	getchar();
	j = 0;
/*	while(j <= 2)//释放测试
	{
		j++;
		sprintf(str, "mem%d", j);
		memset(&region, 0x00, sizeof(struct mem_block));
		region.id = j;
		region.is_use_buffer = 1;
		memcpy(region.name, str, strlen(str));
		printf("user will del:%s, id = %d\n", str, region.id);
		if (ioctl(cmem_fd, CMEM_UNMAP, &region) < 0) //获取全部空间
		{
			perror("PMEM_GET_TOTAL_SIZE failed\n");
			return -1;
		}
	}

	getchar();*/
	close(cmem_fd);
	return 0;
}

CMA连续物理内存用户空间映射---(二)

时间: 2024-10-10 07:41:54

CMA连续物理内存用户空间映射---(二)的相关文章

CMA连续物理内存用户空间映射---(一)

背景: 在多媒体和图像处理等应用中,经经常使用到大块内存,尤其是硬件编解码.须要内核分配大块的物理连续内存. 这里希望通过把从内核分配的连续物理内存映射到用户空间.在用户空间经过处理,又能够入队到驱动中. 前提: Kernel Config中 依据需求配置和调整CMA的大小. 方法: (一) 1.驱动注冊misc设备. 2.驱动实现IOCTL的内存分配,使用dma_alloc_writecombine从CMA中拿出一个内存. 3.驱动实现mmap,通过remap_pfn_range,把上面第二步

Linux内核工程导论——用户空间进程使用内核资源

本文大部分转载和组装,只是觉得这些知识应该放到一起比较好. 进程系统资源的使用原理 大部分进程通过glibc申请使用内存,但是glibc也是一个应用程序库,它最终也是要调用操作系统的内存管理接口来使用内存.大部分情况下,glibc对用户和操作系统是透明的,所以直接观察操作系统记录的进程对内存的使用情况有很大的帮助.但是glibc自己的实现也是有问题的,所以太特殊情况下追究进程的内存使用也要考虑glibc的因素.其他操作系统资源使用情况则可以直接通过proc文件系统查看. 进程所需要的系统资源种类

Linux内存管理--用户空间和内核空间【转】

本文转载自:http://blog.csdn.net/yusiguyuan/article/details/12045255 关于虚拟内存有三点需要注意: 4G的进程地址空间被人为的分为两个部分--用户空间与内核空间.用户空间从0到3G(0xc0000000),内核空间占据3G到4G.用户进程通常情况下只能访问用户空间的虚拟地址,不能访问内核空间的虚拟地址.例外情况只有用户进程进行系统调用(代表用户进程在内核态执行)等时刻可以访问到内核空间. 用户空间对应进程,所以每当进程切换,用户空间就会跟着

linux用户空间和内核空间

When a process running in user mode requests additional memory, pages are allocated from the list of free page frames maintained by the kernel. This list is typically populated using a page-replacement algorithm such as those discussed in Section 9.4

linux内存管理-内核用户空间 【转】

转自:http://blog.chinaunix.net/uid-25909619-id-4491362.html 1,linux内存管理中几个重要的结构体和数组 page unsigned long flags 一组标志,也对页框所在的管理区进行编号 atomic_t _count 该页被引用的次数 atomic_t _mapcount 页框中页表项数目,如果没有则为-1 struct list_head lru 管理page忙碌/空闲链表(inactive_list/active_list)

【转】linux 用户空间与内核空间——高端内存详解

摘要:Linux 操作系统和驱动程序运行在内核空间,应用程序运行在用户空间,两者不能简单地使用指针传递数据,因为Linux使用的虚拟内存机制,用户空间的数据可能被换出,当内核空间使用用户空间指针时,对应的数据可能不在内存中.用户空间的内存映射采用段页式,而内核空间有自己的规则:本文旨在探讨内核空间的地址映射. Linux内核地址空间划分 通常32位Linux内核虚拟地址空间划分0~3G为用户空间,3~4G为内核空间(注意,内核可以使用的线性地址只有1G).注意这里是32位内核地址空间划分,64位

linux用户空间和内核空间(内核高端内存)_转

转自:Linux用户空间与内核空间(理解高端内存) 参考: 1. 进程内核栈.用户栈 2. 解惑-Linux内核空间 3. linux kernel学习笔记-5 内存管理 Linux 操作系统和驱动程序运行在内核空间,应用程序运行在用户空间,两者不能简单地使用指针传递数据,因为Linux使用的虚拟内存机制,用户空间的数据可能被换出,当内核空间使用用户空间指针时,对应的数据可能不在内存中. Linux内核地址映射模型 x86 CPU采用了段页式地址映射模型.进程代码中的地址为逻辑地址,经过段页式地

内核空间与用户空间的通信方式

内核空间与用户空间的通信方式 下面总结了7种方式,主要对以前不是很熟悉的方式做了编程实现,以便加深印象. 1.使用API:这是最常使用的一种方式了 A.get_user(x,ptr):在内核中被调用,获取用户空间指定地址的数值并保存到内核变量x中. B.put_user(x,ptr):在内核中被调用,将内核空间的变量x的数值保存到到用户空间指定地址处. C.Copy_from_user()/copy_to_user():主要应用于设备驱动读写函数中,通过系统调用触发. 2.使用proc文件系统:

Linux Malloc分析-从用户空间到内核空间

本文介绍malloc的实现及其malloc在进行堆扩展操作,并分析了虚拟地址到物理地址是如何实现映射关系. ordeder原创,原文链接: http://blog.csdn.net/ordeder/article/details/41654509 1背景知识 1.1 进程的用户空间 图1:来源 http://www.open-open.com/lib/view/open1409716051963.html 该结构是由进程task_struct.mm_struct进行管理的mm_struct的定义