转载请标明出处:
http://blog.csdn.net/yujun411522/article/details/46334123
本文出自:【yujun411522的博客】
在linux中不同的进程之间是相互隔离的,如果需要通信就需要通过进程间通信(Inter Process Communication)IPC机制来进行间接通信。linux中间接通信的方式主要有signal,pipe,message queue,semaphore,shared memory等,这些机制效率低下或不适合封装所以没有大规模使用,反倒是Binder机制的大量使用。Binder是android对linux的一个拓展,属于一个字符设备,通过这个设备实现不同进程之间的间接通信。分为Native层和Java层,先看Native层。
在android中有各种各样的服务Service,这些Service所在的进程称为Server进程,使用这些Service的叫做Client进程,这是一个典型的C/S结构的,不过这里的C/S架构又多了一些其他的东西:
Client:使用Service的一方
Server:提供service的一方
Proxy:位于Client端,提供访问服务接口,主要是为了屏蔽client和server端通信的细节
stub:位于server端,作用和proxy类似,也是屏蔽proxy与server端通信的细节,相当于service的代理
5.1 ServiceManager的启动过程
因为在android中用到的服务很多,所以为了管理起来方便,增加一个组件ServiceManager,该组件可以完成Service注册和检索。Service在启动时要将信息注册到ServiceManager中,ServiceManager中维护了一个Service信息的列表。所以在Client需要服务时只需要向ServiceManager提供Service的名称即可。ServiceManager的启动在init.rc文件中已经配置过了:
service servicemanager /system/bin/servicemanager
class core #class级别属于core级别
user system #用户和用户组属于system
group system
critical #非常重要的服务,如果一段时间重启次数过多,系统就会重启
onrestart restart zygote #如果重启则导致zygote、media重启,
onrestart restart media
ServiceManager对应的程序是frameworks/base/cmds/servicemanager/service_manager.c
int main(int argc, char **argv)
{
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER;
// 1.打开binder设备并用了IPC通信
bs = binder_open(128*1024);
// 2.注册为context manager
if (binder_become_context_manager(bs)) {
LOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
//进入死循环等待IPC数据
binder_loop(bs, svcmgr_handler);
return 0;
}
ServiceManager的启动工程主要有三部:
1.初始化binder并调用binder_open函数打开binder并映射共享内存
2.调用binder_become_context_manager注册为context manager
3.调用binder_loop进入无限等待处理IPC通信
5.1.1 binder_open函数打开binder并映射共享内存
binder_open函数主要用于初始化binder通信,在frameworks/base/cmds/servicemanager/binder.c文件中:
struct binder_state *binder_open(unsigned mapsize)
{
//mapsize =128*1024 ,128K
//创建binder_state结构体,并分配内存
struct binder_state *bs;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return 0;
}
//以读写方式打开binder设备
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
//将文件映射到当前进程的虚拟地址空间
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
/* TODO: check version */
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return 0;
}
其中涉及到了binder_state结构体:
struct binder_state
{
int fd;//文件描述符
void *mapped;//映射区的起始地址
unsigned mapsize;//映射区的大小
};
上述函数做了三个工作:1 创建binder_state并分配空间;2以读写方式打开binder设备文件;3将设备文件映射到进程虚拟地址空间。
因为内核空间是可以共享的,可以在内核开辟缓冲区保存进程间的通信数据,这样就可以实现共享内存。在上面的函数中是通过open和mmap函数来实现的,首先open打开binder设备,然后将binder设备映射到进程的虚拟地址空间并通知在内核空间创建一个128k的缓冲区来保存IPC数据,所以这样就可以将进程中某个内存区域和内核空间的某个内存区域建立映射关系,servicemanager利用内核缓冲区共享数据。
5.1.2 binder_become_context_manager注册为context manager
打开binder并映射到内存之后,servicemanager会将自身注册为contextmanager:
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
ioctl函数又调用binder_ioctl,binder_ioctl函数在kernel/drivers/staging/android/binder.c中
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
//参数为 bs->fd, BINDER_SET_CONTEXT_MGR, 0
int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) return ret; mutex_lock(&binder_lock); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; }
//这里cmd=BINDER_SET_CONTEXT_MGR
switch (cmd) { .....
case BINDER_WRITE_READ:
case BINDER_SET_MAX_THREADS:
case BINDER_SET_CONTEXT_MGR:
case BINDER_THREAD_EXIT:
//只能有一个context mananger if (binder_context_mgr_node != NULL) { ret = -EBUSY; goto err; }
//context manager已经创建了,判断是否是当前线程创建 if (binder_context_mgr_uid != -1) { if (binder_context_mgr_uid != current->cred->euid) { ret = -EPERM; goto err; } } else
//没有创建,则创建
binder_context_mgr_uid = current->cred->euid;//更新uid
//创建一个context mananger node binder_context_mgr_node = binder_new_node(proc, NULL, NULL); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto err; }
//增加引用计数 binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; break; default: ret = -EINVAL; goto err; } ret = 0; ...... }
可以看出binder_ioctl函数可以用来处理各种类型的指令。
5.1.3 binder_loop进入无限等待处理IPC通信
servicemananger可以处理service组件注册请求和client组件使用服务请求,所以要不停的循环接受IPC请求
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
unsigned readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//调用 binder_write
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {//无限循环处理
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
//再次调用ioctl,读取IPC数据
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
...
//调用biner_parse解析读取的数据
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
....
}
}
主要涉及到binder_write函数和无限循环,先看binder_write函数
1.binder_write函数
int binder_write(struct binder_state *bs, void *data, unsigned len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data;
bwr.read_size = 0;//write数据,这里read_size==0
bwr.read_consumed = 0;
bwr.read_buffer = 0;
调用ioctl ,命令是BINDER_WRITE_READ
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
..
return res;
}
有涉及到ioctl,调用binder_ioctl函数,只看处理BINDER_WRITE_READ 代码
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
......
case BINDER_WRITE_READ: {
struct binder_write_read bwr; if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } //刚才设置的write_size>0,进入到这里 if (bwr.write_size > 0) {
//调用了binder_thread_write函数 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); .. }
//刚才设置的read_size=0,不会进入到这里
if (bwr.read_size > 0) {
... } if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; }
}
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed) { uint32_t cmd; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error == BR_OK) { if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { binder_stats.bc[_IOC_NR(cmd)]++; proc->stats.bc[_IOC_NR(cmd)]++; thread->stats.bc[_IOC_NR(cmd)]++; } switch (cmd) { .... case BC_ENTER_LOOPER: if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;//标记错误状态 } thread->looper |= BINDER_LOOPER_STATE_ENTERED;///标记状态为BINDER_LOOPER_STATE_ENTERED break; .... } return 0; }
进入到BC_ENTER_LOOPER,将looper标记为BINDER_LOOPER_STATE_ENTERED。然后进入无限循环之中:
for (;;) {//无限循环处理bwr.read_size = sizeof(readbuf);bwr.read_consumed = 0;bwr.read_buffer = (unsigned) readbuf;//再次调用ioctl res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);...res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);....}
2.无限循环
进入无限循环之后,这是read_size>0但是write_size=0,又发送一个BINDER_WRITE_READ命令,继续进入binder_ioctl函数,直接进入BINDER_WRITE_READ分支:
case BINDER_WRITE_READ: {
struct binder_write_read bwr; .. if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } ... if (bwr.write_size > 0) {//write_size=0,条件不成立 .... } if (bwr.read_size > 0) {
//调用binder_thread_read函数 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; }
直接调用的binder_thread_read函数,该函数作用是读取IPC的数据。读取完IPC数据之后就调用biner_parse解析读取的数据
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uint32_t *ptr, uint32_t size, binder_handler func)
{
int r = 1;
uint32_t *end = ptr + (size / 4);
while (ptr < end) {
uint32_t cmd = *ptr++;
switch(cmd) {
.....
case BR_TRANSACTION: {
struct binder_txn *txn = (void *) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
LOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {//func为指定的处理函数,这里是svcmgr_handler
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
//初始化msg和reply数据
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
//调用处理函数func
res = func(bs, txn, &msg, &reply);
//把结果发送给binder
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
.....
}
}
return r;
}
初始化msg和reply对象之后调用func,也就是svcmgr_handler来处理:
int svcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
// LOGI("target=%p code=%d pid=%d uid=%d\n",
// txn->target, txn->code, txn->sender_pid, txn->sender_euid);
if (txn->target != svcmgr_handle)
return -1;
// Equivalent to Parcel::enforceInterface(), reading the RPC
// header with the strict mode policy mask and the interface name.
// Note that we ignore the strict_policy and don‘t propagate it
// further (since we do no outbound RPCs anyway).
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return -1;
}
switch(txn->code) {//在添加或者查询service时会写入请求的命令
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
//检测service函数
ptr = do_find_service(bs, s, len);
if (!ptr)
break;
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
//添加service函数
if (do_add_service(bs, s, len, ptr, txn->sender_euid))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
unsigned n = bio_get_uint32(msg);
si = svclist;
//遍历service
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
LOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
当client端调用getService和addService函数时对应其中的SVC_MGR_ADD_SERVICE和SVC_MGR_CHECK_SERVICE,分别执行
do_add_service和do_find_service函数
1 do_add_service函数
int do_add_service(struct binder_state *bs,
uint16_t *s, unsigned len,
void *ptr, unsigned uid)
{
struct svcinfo *si;
..
//先判断当前用户是否可以注册,权限检测
if (!svc_can_register(uid, s)) {
LOGE("add_service(‘%s‘,%p) uid=%d - PERMISSION DENIED\n",
str8(s), ptr, uid);
return -1;
}
si = find_svc(s, len);
if (si) {//已经注册了
if (si->ptr) {
svcinfo_death(bs, si);
}
si->ptr = ptr;
} else {
//为新注册的服务分配内存
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {//OOM
LOGE("add_service(‘%s‘,%p) uid=%d - OUT OF MEMORY\n",
str8(s), ptr, uid);
return -1;
}
si->ptr = ptr;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = ‘\0‘;
si->death.func = svcinfo_death;
si->death.ptr = si;
//新添加的服务增加到svclist中
si->next = svclist;
svclist = si;
}
binder_acquire(bs, ptr);
binder_link_to_death(bs, ptr, &si->death);
return 0;
}
其中权限检测的函数
int svc_can_register(unsigned uid, uint16_t *name)
{
unsigned n;
if ((uid == 0) || (uid == AID_SYSTEM))
return 1;
for (n = 0; n < sizeof(allowed) / sizeof(allowed[0]); n++)
if ((uid == allowed[n].uid) && str16eq(name, allowed[n].name))
return 1;
return 0;
}
其中涉及到一个变量allowed:
static struct {
unsigned uid;//user id
const char *name;//user name
} allowed[] = {
#ifdef LVMX
{ AID_MEDIA, "com.lifevibes.mx.ipc" },
#endif
{ AID_MEDIA, "media.audio_flinger" },
{ AID_MEDIA, "media.player" },
{ AID_MEDIA, "media.camera" },
{ AID_MEDIA, "media.audio_policy" },
{ AID_DRM, "drm.drmManager" },
{ AID_NFC, "nfc" },
{ AID_RADIO, "radio.phone" },
{ AID_RADIO, "radio.sms" },
{ AID_RADIO, "radio.phonesubinfo" },
{ AID_RADIO, "radio.simphonebook" },
/* TODO: remove after phone services are updated: */
{ AID_RADIO, "phone" },
{ AID_RADIO, "sip" },
{ AID_RADIO, "isms" },
{ AID_RADIO, "iphonesubinfo" },
{ AID_RADIO, "simphonebook" }
}
可见不是说任何用户都可以添加服务的,只有在allowed数组中出现的才可以添加服务。添加完服务之后就需要看一下如何检索服务。
2 do_find_service函数
void *do_find_service(struct binder_state *bs, uint16_t *s, unsigned len)
{
struct svcinfo *si;
si = find_svc(s, len);
if (si && si->ptr) {
return si->ptr;
} else {
return 0;
}
}
其中调用了find_svc函数:
struct svcinfo *find_svc(uint16_t *s16, unsigned len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return 0;
}
其实就是遍历svclist查询相应的服务。
到目前为止我们看的都是server端的操作,他可以响应添加service和检索service操作。那么谁会添加service呢?则个问题要搞清楚,是service自己添加到servicemanager,所以这个时候service是client
5.2 Service的启动和注册
这里以media服务的启动为例,它对应的main函数是:
int main(int argc, char** argv)
{
//创建processstate对象,赋值给proc变量
sp<ProcessState> proc(ProcessState::self());
//获得servicemanager代理对象
sp<IServiceManager> sm = defaultServiceManager();
LOGI("ServiceManager: %p", sm.get());
注册并运行下面四个服务
AudioFlinger::instantiate();
MediaPlayerService::instantiate();
CameraService::instantiate();
AudioPolicyService::instantiate();
//创建一个线程池
ProcessState::self()->startThreadPool();
//加入到线程池中
IPCThreadState::self()->joinThreadPool();
}
一共四个步骤:1创建processstate对象;2换取servicemanager代理;3注册服务;4开启线程池
1创建processstate对象
调用了ProcessState中的self方法,Processstate类在frameworks/base/lib/binder/ProcessState.cpp文件中
sp<ProcessState> ProcessState::self()
{
//通过单例模式,一个进程只有一个ProcessState
if (gProcess != NULL) return gProcess;
AutoMutex _l(gProcessMutex);
if (gProcess == NULL) gProcess = new ProcessState;
return gProcess;
}
如果为null,则构造一个ProcessState:
ProcessState::ProcessState()
: mDriverFD(open_driver())//调研open_driver函数,将返回值赋给mDriverFD
, mVMStart(MAP_FAILED)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
//然后调用mmap将binder映射到media service 进程之中
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
....
}
这个过程和servicemanager的binder_open函数功能类似。先看一个很重要的函数open_driver,它的返回值赋给mDriverFD
static int open_driver()
{
//以读写的方式打开binder
int fd = open("/dev/binder", O_RDWR);
if (fd >= 0) {
//如果当前进程执行exec系列函数时,关闭fd
fcntl(fd, F_SETFD, FD_CLOEXEC);
int vers;
//发送BINDER_VERSION命令,查询版本号村vers中
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
LOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
//比较版本号是否一致
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
LOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
size_t maxThreads = 15;
//通知binder驱动,设置当前server线程池最大15
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
LOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
LOGW("Opening ‘/dev/binder‘ failed: %s\n", strerror(errno));
}
return fd;
}
首先open打开binder,然后获取binder协议,设置server最大支持线程数。
这一部分的主要工作类似于ServiceManager中的binder_open函数,打开binder,读取或者设置一些值。
2换取servicemanager代理
不论是添加服务还是查询服务,都要先获取servicemanager的代理对象,通过这个代理对象来实现与servicemanager通信,这个过程会涉及到一下几个层次:
通过defaultServiceManager函数来实现:
1.Binder通信接口:通信的实现,有IBinder、BBinder、BpBinder。其中BBinder和BpBinder都是IBinder的子类,BBinder是运行在service端的,相当于service的代理,而BpBinder则是在client端的代理。
2.Binder服务接口:定义client端可以访问server端哪些服务,又IserviceManager来实现
3.Proxy:由BpInterface和BpServiceManager来实现。其中BpInterface继承BpRefBase,成员变量mRemote存储了client端的BpBinder对象,而BpServiceMananger实现IServiceMananger中声明的方法
4.Stub由BnInterface和BnServiceManager来实现。没有使用
先看defaultServiceManager函数的作用就是获取ServiceManager的proxy对象,该函数位于frameworks/native/libs/binder/IServiceManager.cpp
sp<IServiceManager> defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
//也是单例模式获取gDefaultServiceManager
AutoMutex _l(gDefaultServiceManagerLock);
if (gDefaultServiceManager == NULL) {
先调用getContextObject,再将结果传递到interface_case函数中
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));
}
}
return gDefaultServiceManager;
}
1 getContextObject
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
return getStrongProxyForHandle(0);
}
调用getStrongProxyForHandle,这里传递的参数是0
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
//handle=0
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
// b=new BpBinder(0);
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
返回BpBinder(0),看BpBinder的构造函数:
//handle=0
BpBinder::BpBinder(int32_t handle)
: mHandle(handle)
, mAlive(1)
, mObitsSent(0)
, mObituaries(NULL)
{
LOGV("Creating BpBinder %p handle %d\n", this, mHandle);
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
IPCThreadState::self()->incWeakHandle(handle);
}
调用了 IPCThreadState::self()->incWeakHandle(handle),先看 IPCThreadState::self()
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}
if (gShutdown) return NULL;
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
if (pthread_key_create(&gTLS, threadDestructor) != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
这其中涉及到pthread_getspecific。在linux线程中,提供了一个当前进程可以访问,但是其他线程不可访问的变量,就是Thead Local Storage(线程局部存储,TLS)。通过pthread_getspecific和pthread_setspecific来实现,类似与hashmap的get和set键值对。可以看出self函数的作用就是返回唯一的一个IPCThreadState,看IPCThreadState的构造函数:
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),//将ProcessState变量赋值为mProcess
mMyThreadId(androidGetTid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
pthread_setspecific(gTLS, this);//设置gTLS为当前对象
clearCaller();
mIn.setDataCapacity(256);//mIn 、mOut 都是parcel类型,设置大小
mOut.setDataCapacity(256);
}
创建完IPCThreadState之后,调用incWeakHandle函数:
void IPCThreadState::incWeakHandle(int32_t handle)//handle =0
{
//向mOut写入BC_INCREFS 和0
mOut.writeInt32(BC_INCREFS);
mOut.writeInt32(handle);
}
getContextObject函数除了返回一个BpBinder对象之外没有和binder通信,ProcessState::self()->getContextObject(NULL)等价于new BpBinder(0)
继续往下看:
2 interface_cast
interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
等价于 interface_cast<IServiceManager>(new BpBinder(0))
interface_cast<IServiceManager>(obj)等价于IServiceManager::asInterface(obj)
所以最终等价于IServiceManager::asInterface(new BpBinder(0))
看IServiceManager的asInterface方法:
android::sp<IServiceManager>IServiceManager::asInterface(
const android::sp<android::IBinder>& obj)
{
android::sp<IServiceManager> intr;
if (obj != NULL) {
intr = static_cast<IServiceManager>(
obj->queryLocalInterface( //BpBinder.qureyLocalInterface的返回值为null
IServiceManager::descriptor).get());
if (intr == NULL) { //执行这一句,实际是new BpServiceMananger(new BpBinder(0))
intr = new BpServiceManager(obj);
}
}
return intr;
}
查询queryLocalInterface是否为null,obj类型为BpBinder其queryLocalInterface函数继承IBinder,在frameworks/native/libs/binder/Binder.cpp
sp<IInterface> IBinder::queryLocalInterface(const String16& descriptor)
{
return NULL;
}
直接返回空,那么创建一个BpServiceManager,该类在frameworks/native/libs/binder/IServiceManager.cpp文件中:
BpServiceManager(const sp<IBinder>& impl)
: BpInterface<IServiceManager>(impl)//impl =new BpBinder(0),调用父类BpInterface 否则函数
{
}
BpServiceManager的构造函数中调用了父类BpInterface的构造函数,该类在frameworks/native/libs/binder/IInterface.h文件中:
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote)//调用了父类BpRefBase 构造函数
{
}
调用了父类BpRefBase 构造函数,该类在frameworks/native/libs/binder/Binder.h文件中
class BpRefBase : public virtual RefBase
{
inline IBinder* remote() { return mRemote; }
inline IBinder* remote() const { return mRemote; }
private:
BpRefBase(const BpRefBase& o);
BpRefBase& operator=(const BpRefBase& o);
IBinder* const mRemote;
}
就是将BpBinder存入到BpRefBase的mRemote变量中,可以通过remote方法返回mRemote
3注册服务
4开启线程池