servicemanager是binder部分的关键组件之一,binder client要想获取service提供的服务,可以根据service的名字向servicemanager发出获取service的请求。而binder service则需要在servicemanager中注册之后,才能够被别的client使用。显然要在其他binder client或者binder service访问servicemanager前,servicemanager就得启动了,同时servicemanager需要作为守护进程一直运行在系统中,否则当servicemanager挂掉后,binder client和binder service都无法正常工作了。
1. init启动servicemanager
servicemanager作为守护进程是由init进程来启动的,下面是代码中servicemanager.rc中的代码
1 2 3 4 5 6 7 8 9 10 11 12 13 |
service servicemanager /system/bin/servicemanager class core user system group system readproc critical onrestart restart healthd onrestart restart zygote onrestart restart audioserver onrestart restart media onrestart restart surfaceflinger onrestart restart inputflinger onrestart restart drm onrestart restart cameraserver |
servicemanager的class为core,core服务都是系统最基本的服务,只有core服务全部启动了,手机才能运行起来。servicemanager起来后的user为system,group为system和readproc(正常来说应该只有system才对,因为用的是mtk的代码,所以不清楚mtk是否有修改这里)。onrestart表示如果servicemanager重新启动的话就重新启动healthed、zygote、media等服务。所以servicemanager服务实际运行的程序就是system/bin/servicemanager了。
2. servicemanager的Android.mk
下面是servicemanager的Android.mk文件的内容,省略了一部分无关的内容
1 2 3 4 5 6 7 |
include $(CLEAR_VARS) LOCAL_SHARED_LIBRARIES := liblog libcutils libselinux LOCAL_SRC_FILES := service_manager.c binder.c LOCAL_CFLAGS += $(svc_c_flags) LOCAL_MODULE := servicemanager LOCAL_INIT_RC := servicemanager.rc include $(BUILD_EXECUTABLE) |
源码文件只包含两个:service_manager.c和binder.c,启动的init rc文件为servicemanager.rc
3. servicemanager源码分析
3.1 service_manager.c main函数
main函数源码如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
int main() { struct binder_state *bs; // 打开binder驱动 bs = binder_open(128*1024); if (!bs) { ALOGE("failed to open binder driver\n"); return -1; } // servicemanager将自己设置为binder系统的大管家 if (binder_become_context_manager(bs)) { ALOGE("cannot become context manager (%s)\n", strerror(errno)); return -1; } // selinux权限相关设置 selinux_enabled = is_selinux_enabled(); sehandle = selinux_android_service_context_handle(); selinux_status_open(true); if (selinux_enabled > 0) { if (sehandle == NULL) { ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n"); abort(); } if (getcon(&service_manager_context) != 0) { ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n"); abort(); } } union selinux_callback cb; cb.func_audit = audit_callback; selinux_set_callback(SELINUX_CB_AUDIT, cb); cb.func_log = selinux_log_callback; selinux_set_callback(SELINUX_CB_LOG, cb); // 进入循环,等待事件到来 binder_loop(bs, svcmgr_handler); return 0; } |
main函数的内容比较简单,首先打开binder驱动,然后将自己设置为binder系统的“大管家”,然后就进入循环等待事件到来。下面看看binder_open里面做了什么设置:
3.2 binder.c binder_open函数
binder_open函数的内容放在了binder.c文件中
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
struct binder_state { int fd; void *mapped; size_t mapsize; }; struct binder_state *binder_open(size_t mapsize) { // binder_state定义如上,记录fd,映射内存的起始地址和大小等信息 struct binder_state *bs; struct binder_version vers; bs = malloc(sizeof(*bs)); if (!bs) { errno = ENOMEM; return NULL; } // 打开binder驱动 bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC); if (bs->fd < 0) { fprintf(stderr,"binder: cannot open device (%s)\n", strerror(errno)); goto fail_open; } // 调用驱动的ioctl获取binder版本号 if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) || (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) { fprintf(stderr, "binder: kernel driver version (%d) differs from user space version (%d)\n", vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION); goto fail_open; } // 传进来的mapsize为128*1024,即128KB大小 bs->mapsize = mapsize; bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); if (bs->mapped == MAP_FAILED) { fprintf(stderr,"binder: cannot map device (%s)\n", strerror(errno)); goto fail_map; } // 将fd等信息返回给main函数 return bs; fail_map: close(bs->fd); fail_open: free(bs); return NULL; } |
总结起来就是打开驱动,然后进行内存映射,然后将文件fd和映射内存的信息返回给main函数
3.3 binder.c binder_become_context_manager函数
1 2 3 4 |
int binder_become_context_manager(struct binder_state *bs) { return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0); } |
binder_become_context_manager直接调用ioctl系统调用,之后系统会调用到binder驱动的ioctl方法,其中command为BINDER_SET_CONTEXT_MGR。
3.4 binder.c binder_ioctl函数
这里的binder.c文件和3.3中的binder.c文件不是同一个文件,这一节的binder.c文件是kernel/drivers底下的binder.c。
1 2 3 4 5 6 7 8 9 10 |
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { ...... case BINDER_SET_CONTEXT_MGR: ret = binder_ioctl_set_ctx_mgr(filp, thread); if (ret) goto err; break; ...... } |
3.5 binder.c binder_ioctl_set_ctx_mgr函数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
static int binder_ioctl_set_ctx_mgr(struct file *filp, struct binder_thread *thread) { int ret = 0; // 获取驱动的binder_proc结构体 struct binder_proc *proc = filp->private_data; kuid_t curr_euid = current_euid(); // 如果之前已经注册过servicemanager,这里就报错 if (binder_context_mgr_node != NULL) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto out; } // selinux规则相关设置 ret = security_binder_set_context_mgr(proc->tsk); if (ret < 0) goto out; if (uid_valid(binder_context_mgr_uid)) { if (!uid_eq(binder_context_mgr_uid, curr_euid)) { pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, curr_euid), from_kuid(&init_user_ns, binder_context_mgr_uid)); ret = -EPERM; goto out; } } else { binder_context_mgr_uid = curr_euid; } // 调用binder_new_node为servicemanager进程创建一个binder_node binder_context_mgr_node = binder_new_node(proc, 0, 0); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto out; } binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; out: return ret; } |
所以binder_become_context_manager最后回到binder驱动里面,创建binder_context_mgr_node这个binder_node,用于记录servicemanager,而且servicemanager只能注册一个,如果之前有注册,那么这里就会报错的。
3.6 binder.c binder_loop函数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
void binder_loop(struct binder_state *bs, binder_handler func) { int res; // 执行BINDER_WRITE_READ需要传送的数据格式 struct binder_write_read bwr; uint32_t readbuf[32]; bwr.write_size = 0; bwr.write_consumed = 0; bwr.write_buffer = 0; readbuf[0] = BC_ENTER_LOOPER; // binder_write函数将readbuf中的内容填充到一个binder_write_read结构体中的write部分 // 然后通过ioctl将数据发送给binder驱动,ioctl的command就是上面的BC_ENTER_LOOPER binder_write(bs, readbuf, sizeof(uint32_t)); // 循环往复,有数据到来就处理数据 for (;;) { bwr.read_size = sizeof(readbuf); bwr.read_consumed = 0; bwr.read_buffer = (uintptr_t) readbuf; // 读取消息,消息保存到readbuf中 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); if (res < 0) { ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno)); break; } // 对接收到的数据进行解析,这里的func就是传进来的参数svcmgr_handler,是一个函数指针 res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); if (res == 0) { ALOGE("binder_loop: unexpected reply?!\n"); break; } if (res < 0) { ALOGE("binder_loop: io error %d %s\n", res, strerror(errno)); break; } } } |
这里调用binder_parse,先对接收到的数据进行解析,解析完毕后会调用svcmgr_handler对解析后的数据进行处理
3.7 binder.c binder_parse函数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
int binder_parse(struct binder_state *bs, struct binder_io *bio, uintptr_t ptr, size_t size, binder_handler func) { int r = 1; uintptr_t end = ptr + (uintptr_t) size; while (ptr < end) { uint32_t cmd = *(uint32_t *) ptr; // 跳过上一句取得的command ptr += sizeof(uint32_t); #if TRACE fprintf(stderr,"%s:\n", cmd_name(cmd)); #endif switch(cmd) { // BR_NOOP和BR_TRANSACTION_COMPLETE不进行任何操作 case BR_NOOP: break; case BR_TRANSACTION_COMPLETE: break; // 这四个command也不需要进行特殊处理 case BR_INCREFS: case BR_ACQUIRE: case BR_RELEASE: case BR_DECREFS: #if TRACE fprintf(stderr," %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *))); #endif ptr += sizeof(struct binder_ptr_cookie); break; case BR_TRANSACTION: { struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; if ((end - ptr) < sizeof(*txn)) { ALOGE("parse: txn too small!\n"); return -1; } binder_dump_txn(txn); // func就是svcmgr_handler函数 if (func) { unsigned rdata[256/4]; // binder_io变量是ServiceManager内部存储binder object的特殊格式 struct binder_io msg; struct binder_io reply; int res; bio_init(&reply, rdata, sizeof(rdata), 4); bio_init_from_txn(&msg, txn); // 消息处理 res = func(bs, txn, &msg, &reply); if (txn->flags & TF_ONE_WAY) { // 如果是异步处理,直接释放占用的内存 binder_free_buffer(bs, txn->data.ptr.buffer); } else { // 返回消息处理结果 binder_send_reply(bs, &reply, txn->data.ptr.buffer, res); } } // 跳过已处理部分数据 ptr += sizeof(*txn); break; } case BR_REPLY: { struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; if ((end - ptr) < sizeof(*txn)) { ALOGE("parse: reply too small!\n"); return -1; } binder_dump_txn(txn); if (bio) { bio_init_from_txn(bio, txn); bio = 0; } else { /* todo FREE BUFFER */ } ptr += sizeof(*txn); r = 0; break; } case BR_DEAD_BINDER: { struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr; ptr += sizeof(binder_uintptr_t); death->func(bs, death->ptr); break; } case BR_FAILED_REPLY: r = -1; break; case BR_DEAD_REPLY: r = -1; break; default: ALOGE("parse: OOPS %d\n", cmd); return -1; } } return r; } |
上述代码的工作读取command,根据command处理数据。对于BR_TRANSACTION,需要出做的事情比较多,还要初始化reply,然后通过func即svcmgr_handler来处理客户的请求,最后将处理的结果通过binder驱动返回给发送请求的客户端。
3.8 service_manager.c svcmgr_handler函数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
struct svcinfo { struct svcinfo *next; uint32_t handle; struct binder_death death; int allow_isolated; size_t len; uint16_t name[0]; }; int svcmgr_handler(struct binder_state *bs, struct binder_transaction_data *txn, struct binder_io *msg, struct binder_io *reply) { // svcinfo定义如上,主要记录注册在servicemanager中各个service的信息 struct svcinfo *si; uint16_t *s; size_t len; uint32_t handle; uint32_t strict_policy; int allow_isolated; // binder传输的目标不是servicemanager,返回错误 if (txn->target.ptr != BINDER_SERVICE_MANAGER) return -1; if (txn->code == PING_TRANSACTION) return 0; // Equivalent to Parcel::enforceInterface(), reading the RPC // header with the strict mode policy mask and the interface name. // Note that we ignore the strict_policy and don't propagate it // further (since we do no outbound RPCs anyway). // 判断数据数据、格式是否正确 strict_policy = bio_get_uint32(msg); s = bio_get_string16(msg, &len); if (s == NULL) { return -1; } if ((len != (sizeof(svcmgr_id) / 2)) || memcmp(svcmgr_id, s, sizeof(svcmgr_id))) { fprintf(stderr,"invalid id %s\n", str8(s, len)); return -1; } if (sehandle && selinux_status_updated() > 0) { struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle(); if (tmp_sehandle) { selabel_close(sehandle); sehandle = tmp_sehandle; } } switch(txn->code) { // 获取或者查询service case SVC_MGR_GET_SERVICE: case SVC_MGR_CHECK_SERVICE: s = bio_get_string16(msg, &len); if (s == NULL) { return -1; } // 这个函数主要是执行查找工作的,根据上面的代码可以知道,servicemanager有一个svcinfo链表 // 用来保存service注册的信息,所以只需要遍历这个链表就可以了,代码比较简单,大家自行分析 handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid); if (!handle) break; bio_put_ref(reply, handle); return 0; // 注册service case SVC_MGR_ADD_SERVICE: s = bio_get_string16(msg, &len); if (s == NULL) { return -1; } handle = bio_get_ref(msg); allow_isolated = bio_get_uint32(msg) ? 1 : 0; // 这里执行service的注册工作,将相关信息填充到svcinfo链表中 if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, txn->sender_pid)) return -1; break; // 列出所有service case SVC_MGR_LIST_SERVICES: { uint32_t n = bio_get_uint32(msg); if (!svc_can_list(txn->sender_pid, txn->sender_euid)) { ALOGE("list_service() uid=%d - PERMISSION DENIED\n", txn->sender_euid); return -1; } si = svclist; while ((n-- > 0) && si) si = si->next; if (si) { bio_put_string16(reply, si->name); return 0; } return -1; } default: ALOGE("unknown code %d\n", txn->code); return -1; } bio_put_uint32(reply, 0); return 0; } |
3.9 service_manager.c do_add_service函数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle, uid_t uid, int allow_isolated, pid_t spid) { struct svcinfo *si; //ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle, // allow_isolated ? "allow_isolated" : "!allow_isolated", uid); if (!handle || (len == 0) || (len > 127)) return -1; // 在svc_can_register中判断是否有权限注册service if (!svc_can_register(s, len, spid, uid)) { ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n", str8(s, len), handle, uid); return -1; } // 查找是否已经有这个service,如果有就覆盖 si = find_svc(s, len); if (si) { if (si->handle) { ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n", str8(s, len), handle, uid); svcinfo_death(bs, si); } si->handle = handle; } else { si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t)); if (!si) { ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n", str8(s, len), handle, uid); return -1; } // 将信息填充到链表里面去 si->handle = handle; si->len = len; memcpy(si->name, s, (len + 1) * sizeof(uint16_t)); si->name[len] = '\0'; si->death.func = (void*) svcinfo_death; si->death.ptr = si; si->allow_isolated = allow_isolated; si->next = svclist; svclist = si; } binder_acquire(bs, handle); binder_link_to_death(bs, handle, &si->death); return 0; } |
servicemanager的功能架构和代码都比较简单直接,和一个C语言小程序类似,主要就是其内部会维护一个svclist链表,记录所有注册到servicemanager中的信息,方便后续查询