-
Notifications
You must be signed in to change notification settings - Fork 1
2. binder_servermanager
Dragonchang edited this page Nov 16, 2017
·
15 revisions
binder object的类型如下:
BINDER_TYPE_BINDER
BINDER_TYPE_HANDLE
BINDER_TYPE_FD
binder和cookie表示当前binder node server对象的地址。
handle 表示binder server的句柄或者writeFileDescriptor的文件描述符。
- writeXXXX-data
- writeBlob
- writeFileDescriptor
- writeStrongBinder
//tr->target.handle为0表示当前请求的binder server为servermanager
struct binder_transaction *t;
target_node = binder_context_mgr_node;
target_proc = target_node->proc;
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
size_t *offp, *off_end;
//从servermanager进程中mmap的地址中获取内存用于binder transation参数的传递,这样只需要一次拷贝就可以将参数传递给
//servermanager,这也是binder进程通信的一大优点。
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
//将offp预先指向binder buffer中data size之后的位置,表明offp用于指示offset(object)首地址。
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
//将addService的参数拷贝到binder buffer中,包括parcel中的data和object
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
}
//开始遍历parcel中object 数据。
off_end = (void *)offp + tr->offsets_size;
off_min = 0;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
//获取addService时writeStrongBinder产生的flat_binder_object对象
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
//fp->type为BINDER_TYPE_BINDER
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
//1.创建相应的binder_node对象同时插入到当前请求调用的进程的nodes红黑树中
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
//2.创建binder_ref对象,并且将该对象插入到处理请求的target进程中的refs_by_node和refs_by_desc红黑树中。
ref = binder_get_ref_for_node(target_proc, node);
//3.修改binder_transaction中的flat_binder_object的type和handle为BINDER_TYPE_HANDLE和target中的
//binder_ref中的desc
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
}
}
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct binder_node *node)
{
//1.首先查找target proc中的refs_by_node 红黑树中是否有要查找的binder_node相对应的
//binder_ref有直接返回,没有创建binder_ref.
struct rb_node *n;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
if (node < ref->node)
p = &(*p)->rb_left;
else if (node > ref->node)
p = &(*p)->rb_right;
else
return ref;
}
//创建binder_ref插入到target proc中中的refs_by_node红黑树中
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
new_ref->debug_id = ++binder_last_id;
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
//为binder_ref中的desc赋值,遍历refs_by_desc递增赋desc值
new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->desc > new_ref->desc)
break;
new_ref->desc = ref->desc + 1;
}
//检查new_ref是否已经插入到proc->refs_by_desc红黑树中
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
if (new_ref->desc < ref->desc)
p = &(*p)->rb_left;
else if (new_ref->desc > ref->desc)
p = &(*p)->rb_right;
else
BUG();
}
//插入new_ref到proc->refs_by_desc红黑树中
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
return new_ref;
}
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void __user *buffer, int size,
signed long *consumed, int non_block)
{
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
//拷贝binder_transaction到binder_transaction_data 给userspace使用。
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data +
proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
}
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
switch(cmd) {
case BR_TRANSACTION: {
//将binder_transaction_data 中数据保存在binder_io变量中
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
}
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
//获取binder_transaction_data中sevice name和flat_binder_object中的handle
//(为该service在servicemanager中创建的binder_ref的desc值)
//以svcinfo的形式保存在svclist的链表中。
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
}
checkService
BpBinder::transact
IPCThreadState::self()->transact
IPCThreadState::waitForResponse
IPCThreadState::talkWithDriver
ioctl
binder_thread_write
//唤醒serviecmanager的binder线程处理此次binder请求
binder_transaction
//request thread wait在binder_thread的waite queue上
binder_thread_read
binder_thread_read
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
}
}
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
data.cmd_reply = BC_REPLY;
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
binder_write(bs, &data, sizeof(data));
}
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
//同过servicemanager usersapce保存的bindr_ref的desc在
//proc->refs_by_desc红黑树中查找对应的binder_ref也就可以找到对应的binder_node
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
struct binder_ref *new_ref;
//在request的进程中使用查找到的binder_node创建相应的binder_ref并且插入到相应的红黑树中
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
}
//同时将新创建的binder_ref中的desc保存在flat_binder_object对象中,传递给userspace
//用来创建Ibinder对象进行binder调用。
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
}
}