|
函数调用:
Ø 初始化
__start_xen()
domain_create() // 这里是创建dom0
evtchn_init() // 初始化
get_free_port()
Ø 操作
相关操作都通过hypercall HYPERVISOR_event_channel_op(int cmd, void *arg)来进行。
arg根据cmd的不同而不同。例如:
#define EVTCHNOP_send 4
struct evtchn_send {
/* IN parameters. */
evtchn_port_t port;
};
typedef struct evtchn_send evtchn_send_t;
被保存在struct evtchn_op中。
所有的操作的服务例程都是:
long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
long rc;
switch ( cmd )
{
case EVTCHNOP_alloc_unbound: {
域间绑定有两个过程:EVTCHNOP_alloc_unbound + EVTCHNOP_bind_interdomain
为指定的dom分配一个port,供remote_dom来进行域间绑定。
Allocate a port in domain <dom> and mark as accepting interdomain bindings from domain <remote_dom>.
struct evtchn_alloc_unbound alloc_unbound;
if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 ) // 调用此函数的时候,struct alloc_unbound
return -EFAULT; // domid_t dom, remote_dom;已经准备好
rc = evtchn_alloc_unbound(&alloc_unbound);
if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_interdomain: {
和指定的远程dom/port建立连接,返回连接的本地port。
<remote_dom,remote_port> must identify a port that is unbound and marked as accepting bindings from the calling domain.
struct evtchn_bind_interdomain bind_interdomain;
if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_interdomain(&bind_interdomain); // 调用时,remote dom/port已经设置好
if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_virq: {
绑定指定的VIRQ到指定的VCPU。
struct evtchn_bind_virq bind_virq;
if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_virq(&bind_virq);
if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_ipi: {
当前dom的VCPU之间的通信。
struct evtchn_bind_ipi bind_ipi;
if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_ipi(&bind_ipi);
if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_pirq: {
只有dom0和IDD才有权申请PIRQ。
这些dom不能直接处理PIRQ,必须由Xen接受PIRQ,然后转发给dom处理。
struct evtchn_bind_pirq bind_pirq;
if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_pirq(&bind_pirq);
if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_close: {
关闭当前dom的指定port。
struct evtchn_close close;
if ( copy_from_guest(&close, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_close(&close);
break;
}
case EVTCHNOP_send: {
供域间通信和虚拟IPI之间使用。VIRQ和PIQR不需要使用,因为notification的发送方是Xen,不需要用hypercall。
struct evtchn_send send;
if ( copy_from_guest(&send, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_send(current->domain, send.port);
break;
}
case EVTCHNOP_status: {
获得dom/port的状态信息。根据绑定类型(这里分为6类)的不同,返回的信息不同。
struct evtchn_status status; // 输入参数为dom/port,查询pair的状态
if ( copy_from_guest(&status, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_status(&status);
if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
rc = -EFAULT;
break;
}
case EVTCHNOP_bind_vcpu: {
将指定的evtchn绑定到指定的VCPU处理。
struct evtchn_bind_vcpu bind_vcpu;
if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
break;
}
case EVTCHNOP_unmask: {
dom如何设置/取消mask参考mask_evtchn()/unmask_evtchn()。
struct evtchn_unmask unmask;
if ( copy_from_guest(&unmask, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_unmask(unmask.port);
break;
}
case EVTCHNOP_reset: {
关闭指定dom的所有evtchn。
struct evtchn_reset reset;
if ( copy_from_guest(&reset, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_reset(&reset);
break;
}
default:
rc = -ENOSYS;
break;
}
return rc;
}
static long evtchn_status(evtchn_status_t *status)
{
struct domain *d;
domid_t dom = status->dom;
int port = status->port; // 被查询的dom/port
struct evtchn *chn;
long rc = 0;
rc = rcu_lock_target_domain_by_id(dom, &d);
if ( rc )
return rc;
spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
rc = -EINVAL;
goto out;
}
chn = evtchn_from_port(d, port); // 获得对应的evtchn
rc = xsm_evtchn_status(d, chn);
if ( rc )
goto out;
switch ( chn->state )
{
case ECS_FREE:
case ECS_RESERVED:
status->status = EVTCHNSTAT_closed;
break;
case ECS_UNBOUND:
status->status = EVTCHNSTAT_unbound;
status->u.unbound.dom = chn->u.unbound.remote_domid; // 输出此dom/pair正开放给哪个远程dom
break;
case ECS_INTERDOMAIN:
status->status = EVTCHNSTAT_interdomain;
status->u.interdomain.dom =
chn->u.interdomain.remote_dom->domain_id;
status->u.interdomain.port = chn->u.interdomain.remote_port; // 输出对点的dom/port
break;
case ECS_PIRQ:
status->status = EVTCHNSTAT_pirq;
status->u.pirq = chn->u.pirq;
break;
case ECS_VIRQ:
status->status = EVTCHNSTAT_virq;
status->u.virq = chn->u.virq;
break;
case ECS_IPI:
status->status = EVTCHNSTAT_ipi;
break;
default:
BUG();
}
status->vcpu = chn->notify_vcpu_id;
out:
spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
}
static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
{
从指定的dom中获得一个free的evtchn(port)。分配给远端dom以后使用。
如果是自己分配或分配给自己使用,则DOMID_SELF。
然后
1. 设置它的state和remote_domid。
2. 填充获得的port进参数alloc。
分配的port被放入xenstore,以后想用的话,从中获得。方法是什么?
struct evtchn *chn;
struct domain *d;
int port;
domid_t dom = alloc->dom;
long rc;
rc = rcu_lock_target_domain_by_id(dom, &d);
if ( rc )
return rc;
spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 ) // 从dom中获得一个free的port
ERROR_EXIT_DOM(port, d);
chn = evtchn_from_port(d, port); // 得到对应的struct evtchn
rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
if ( rc )
goto out;
chn->state = ECS_UNBOUND; // 设置(1)状态为ESC_UNBOUND
if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
chn->u.unbound.remote_domid = current->domain->domain_id; // 设置(2)remote_domid
alloc->port = port; // 设置(3)到evtchn_alloc_unbound_t的port
out:
spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
}
static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
dom在调用hypercall的时候,已经将bind中remote_dom和remote_port设置好
{
struct evtchn *lchn, *rchn;
struct domain *ld = current->domain, *rd;
int lport, rport = bind->remote_port;
domid_t rdom = bind->remote_dom;
long rc;
if ( rdom == DOMID_SELF )
rdom = current->domain->domain_id;
if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
return -ESRCH;
/* Avoid deadlock by first acquiring lock of domain with smaller id. */
if ( ld < rd )
{
spin_lock(&ld->event_lock);
spin_lock(&rd->event_lock);
}
else
{
if ( ld != rd )
spin_lock(&rd->event_lock);
spin_lock(&ld->event_lock);
}
if ( (lport = get_free_port(ld)) < 0 ) // 获得一个本地的free port
ERROR_EXIT(lport);
lchn = evtchn_from_port(ld, lport); // 得到对应的本地evtchn
if ( !port_is_valid(rd, rport) )
ERROR_EXIT_DOM(-EINVAL, rd);
rchn = evtchn_from_port(rd, rport); // 根据rd和rport,获得远端evtchn
if ( (rchn->state != ECS_UNBOUND) || // 远端evtchn的state要是ESC_UNBOUND
(rchn->u.unbound.remote_domid != ld->domain_id) ) // 远端evtchn的remote domid要是自己的id
ERROR_EXIT_DOM(-EINVAL, rd); // 这里就是要做的检查。即远端dom的port必须开放给了
// 自己。开放是在EVTCHNOP_alloc_unbound里面做的
rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
if ( rc )
goto out;
lchn->u.interdomain.remote_dom = rd;
lchn->u.interdomain.remote_port = (u16)rport;
lchn->state = ECS_INTERDOMAIN; // 设置本地evtchn的状态
rchn->u.interdomain.remote_dom = ld;
rchn->u.interdomain.remote_port = (u16)lport;
rchn->state = ECS_INTERDOMAIN; // 设置远端evtchn的状态
/*
* We may have lost notifications on the remote unbound port. Fix that up
* here by conservatively always setting a notification on the local port.
*/
evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); // 设置本evtchn绑定的VCPU的pending位
// lchn里面的notify_vcpu_id是什么时候设置的
bind->local_port = lport; // 设置输出参数,本地port
out:
spin_unlock(&ld->event_lock);
if ( ld != rd )
spin_unlock(&rd->event_lock);
rcu_unlock_domain(rd);
return rc;
}
static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
{
struct evtchn *chn;
struct vcpu *v;
struct domain *d = current->domain;
int port, virq = bind->virq, vcpu = bind->vcpu;
long rc = 0;
if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
if ( virq_is_global(virq) && (vcpu != 0) ) // 全局型VIRQ只能绑定到VCPU0
return -EINVAL;
if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
((v = d->vcpu[vcpu]) == NULL) ) // 根据VCPU的id,获得VCPU
return -ENOENT;
spin_lock(&d->event_lock);
if ( v->virq_to_evtchn[virq] != 0 ) // 如果对应的port不为0
ERROR_EXIT(-EEXIST);
if ( (port = get_free_port(d)) < 0 ) // 获得port
ERROR_EXIT(port);
chn = evtchn_from_port(d, port); // 获得对应的evtchn
chn->state = ECS_VIRQ; // 设置state为ECS_VIRQ
chn->notify_vcpu_id = vcpu; // 在evtchn中,设置绑定到的VCPU
chn->u.virq = virq; // 设置绑定的VIRQ
v->virq_to_evtchn[virq] = bind->port = port; // VIRQ绑定的port设置到vcpu的virq_to_evtchn
// PIRQ设置到dom里面的pirq_to_evtchn
out:
spin_unlock(&d->event_lock);
return rc;
}
static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
{
struct evtchn *chn;
struct domain *d = current->domain; // 当前dom
int port, vcpu = bind->vcpu;
long rc = 0;
if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
(d->vcpu[vcpu] == NULL) )
return -ENOENT;
spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 ) // 获得一个port
ERROR_EXIT(port);
chn = evtchn_from_port(d, port); // 获得port对应的evtchn
chn->state = ECS_IPI;
chn->notify_vcpu_id = vcpu; // evtchn的IPI绑定,即设置此evtchn的notify_vcpu_id
bind->port = port; // 设置输出参数
out:
spin_unlock(&d->event_lock);
return rc;
}
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
{
struct evtchn *chn;
struct domain *d = current->domain;
int port, pirq = bind->pirq;
long rc;
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
return -EINVAL;
if ( !irq_access_permitted(d, pirq) )
return -EPERM;
spin_lock(&d->event_lock);
if ( d->pirq_to_evtchn[pirq] != 0 )
ERROR_EXIT(-EEXIST);
if ( (port = get_free_port(d)) < 0 ) // 分配一个free的port
ERROR_EXIT(port);
chn = evtchn_from_port(d, port); // 获得对应的evtchn
d->pirq_to_evtchn[pirq] = port; // PIRQ绑定的port设置到dom的pirq_to_evtchn
rc = pirq_guest_bind(d->vcpu[0], pirq,
!!(bind->flags & BIND_PIRQ__WILL_SHARE));
if ( rc != 0 )
{
d->pirq_to_evtchn[pirq] = 0;
goto out;
}
chn->state = ECS_PIRQ;
chn->u.pirq = pirq; // PIRQ绑定
bind->port = port; // 设置输出参数
out:
spin_unlock(&d->event_lock);
return rc;
}
static long __evtchn_close(struct domain *d1, int port1)
{
struct domain *d2 = NULL;
struct vcpu *v;
struct evtchn *chn1, *chn2;
int port2;
long rc = 0;
again:
spin_lock(&d1->event_lock);
if ( !port_is_valid(d1, port1) )
{
rc = -EINVAL;
goto out;
}
chn1 = evtchn_from_port(d1, port1); // 得到对应的evtchn
/* Guest cannot close a Xen-attached event channel. */
if ( unlikely(chn1->consumer_is_xen) )
{
rc = -EINVAL;
goto out;
}
switch ( chn1->state )
{
case ECS_FREE:
case ECS_RESERVED:
rc = -EINVAL;
goto out;
case ECS_UNBOUND:
break;
case ECS_PIRQ:
pirq_guest_unbind(d1, chn1->u.pirq);
d1->pirq_to_evtchn[chn1->u.pirq] = 0;
break;
case ECS_VIRQ:
for_each_vcpu ( d1, v )
{
if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
continue;
v->virq_to_evtchn[chn1->u.virq] = 0;
spin_barrier_irq(&v->virq_lock);
}
break;
case ECS_IPI:
break;
case ECS_INTERDOMAIN:
if ( d2 == NULL )
{
d2 = chn1->u.interdomain.remote_dom;
/* If we unlock d1 then we could lose d2. Must get a reference. */
if ( unlikely(!get_domain(d2)) )
BUG();
if ( d1 < d2 )
{
spin_lock(&d2->event_lock);
}
else if ( d1 != d2 )
{
spin_unlock(&d1->event_lock);
spin_lock(&d2->event_lock);
goto again;
}
}
else if ( d2 != chn1->u.interdomain.remote_dom )
{
/*
* We can only get here if the port was closed and re-bound after
* unlocking d1 but before locking d2 above. We could retry but
* it is easier to return the same error as if we had seen the
* port in ECS_CLOSED. It must have passed through that state for
* us to end up here, so it's a valid error to return.
*/
rc = -EINVAL;
goto out;
}
port2 = chn1->u.interdomain.remote_port; // 获得远端port
BUG_ON(!port_is_valid(d2, port2));
chn2 = evtchn_from_port(d2, port2); // 获得对应的evtchn
BUG_ON(chn2->state != ECS_INTERDOMAIN);
BUG_ON(chn2->u.interdomain.remote_dom != d1);
chn2->state = ECS_UNBOUND; //设置远端为alloc之后,绑定之前的状态
chn2->u.unbound.remote_domid = d1->domain_id; // ECS_UNBOUND
break;
default:
BUG();
}
/* Clear pending event to avoid unexpected behavior on re-bind. */
clear_bit(port1, &shared_info(d1, evtchn_pending));
/* Reset binding to vcpu0 when the channel is freed. */
chn1->state = ECS_FREE; // 设置本地为ECS_FREE状态
chn1->notify_vcpu_id = 0; // 设置初始化状态绑定的VCPU为0
xsm_evtchn_close_post(chn1);
out:
if ( d2 != NULL )
{
if ( d1 != d2 )
spin_unlock(&d2->event_lock);
put_domain(d2);
}
spin_unlock(&d1->event_lock);
return rc;
}
static long evtchn_close(evtchn_close_t *close)
{
return __evtchn_close(current->domain, close->port);
}
int evtchn_send(struct domain *d, unsigned int lport)
d为本地dom。是current->domain
lport为本地port,要send的对象
{
struct evtchn *lchn, *rchn;
struct domain *ld = d, *rd;
struct vcpu *rvcpu;
int rport, ret = 0;
spin_lock(&ld->event_lock);
if ( unlikely(!port_is_valid(ld, lport)) )
{
spin_unlock(&ld->event_lock);
return -EINVAL;
}
lchn = evtchn_from_port(ld, lport); // 首先获得lport对应的本地evtchn
/* Guest cannot send via a Xen-attached event channel. */
if ( unlikely(lchn->consumer_is_xen) )
{
spin_unlock(&ld->event_lock);
return -EINVAL;
}
ret = xsm_evtchn_send(ld, lchn);
if ( ret )
goto out;
switch ( lchn->state )
{
case ECS_INTERDOMAIN: // 域间通信
rd = lchn->u.interdomain.remote_dom; // 获得远端/对点的rdom
rport = lchn->u.interdomain.remote_port; // 获得远端/对点的rport
rchn = evtchn_from_port(rd, rport); // 获得rport对应的rchn
rvcpu = rd->vcpu[rchn->notify_vcpu_id]; // 获得对点evtchn绑定的VCPU
if ( rchn->consumer_is_xen )
{
/* Xen consumers need notification only if they are blocked. */
if ( test_and_clear_bit(_VPF_blocked_in_xen,
&rvcpu->pause_flags) )
vcpu_wake(rvcpu);
}
else
{
evtchn_set_pending(rvcpu, rport); // 设置对点VCPU上的port有event发生。
} // 下面将异步进行event句柄的处理。
break;
case ECS_IPI:
evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); // IPI的话,则设置对应的VCPU
break;
case ECS_UNBOUND:
/* silently drop the notification */
break;
default: // ESC_VIRQ & ESC_PIRQ不会到这里来
ret = -EINVAL; // 因为这些notification是的发送方是Xen,不需要用hypercall
}
out:
spin_unlock(&ld->event_lock);
return ret;
}
long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
{
绑定VCPU之后,这个event的处理就由该VCPU来完成。
evtchn的屏蔽:
1) 所有的VCPU屏蔽某个evtchn
设置evtchn的MASK位。这个MASK位在struct shared_info里面。
2) 某个VCPU屏蔽所有evtchn
此VCPU结构体struct vcpu_info成员完成。方法在结构体处说明。
struct domain *d = current->domain;
struct evtchn *chn;
long rc = 0;
if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
return -ENOENT;
spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
rc = -EINVAL;
goto out;
}
chn = evtchn_from_port(d, port); // 根据port得到evtchn
/* Guest cannot re-bind a Xen-attached event channel. */
if ( unlikely(chn->consumer_is_xen) )
{
rc = -EINVAL;
goto out;
}
switch ( chn->state )
{
case ECS_VIRQ:
if ( virq_is_global(chn->u.virq) ) // 只有全局性虚拟中断才能绑定VCPU
chn->notify_vcpu_id = vcpu_id; // 所谓绑定,不过是设置evtchn中的notify_vcpu_id
else
rc = -EINVAL;
break;
case ECS_UNBOUND:
case ECS_INTERDOMAIN: // 域间绑定之后,状态会被设置为ECS_INTERDOMAIN。
case ECS_PIRQ:
chn->notify_vcpu_id = vcpu_id;
break;
default:
rc = -EINVAL;
break;
}
out:
spin_unlock(&d->event_lock);
return rc;
}
int evtchn_unmask(unsigned int port)
{
struct domain *d = current->domain;
struct vcpu *v;
spin_lock(&d->event_lock);
if ( unlikely(!port_is_valid(d, port)) )
{
spin_unlock(&d->event_lock);
return -EINVAL;
}
v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id]; // 获得对应的VCPU
/*
* These operations must happen in strict order. Based on
* include/xen/event.h:evtchn_set_pending().
*/
if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) && // 如果evtchn_mask被设置,那么取消设置(屏蔽)
test_bit (port, &shared_info(d, evtchn_pending)) && // 并且evtchn_pending被设置(未决)
!test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d), // 那么设置此VCPU里的evtchn_pending_sel
&vcpu_info(v, evtchn_pending_sel)) )
{
vcpu_mark_events_pending(v); // 并且设置此VCPU里的evtchn_upcall_pending
}
spin_unlock(&d->event_lock);
return 0;
}
static long evtchn_reset(evtchn_reset_t *r)
{
domid_t dom = r->dom;
struct domain *d;
int i, rc;
rc = rcu_lock_target_domain_by_id(dom, &d);
if ( rc )
return rc;
rc = xsm_evtchn_reset(current->domain, d);
if ( rc )
goto out;
for ( i = 0; port_is_valid(d, i); i++ )
(void)__evtchn_close(d, i); // 可以这样做的原因是,当初分配port的时候就是严格按照顺序分配的
rc = 0;
out:
rcu_unlock_domain(d);
return rc;
}
static int evtchn_set_pending(struct vcpu *v, int port)
{
struct domain *d = v->domain; // 通过VCPU找到对应的dom
int vcpuid;
/*
* The following bit operations must happen in strict order.
* NB. On x86, the atomic bit operations also act as memory barriers.
* There is therefore sufficiently strict ordering for this architecture --
* others may require explicit memory barriers.
*/
if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) ) // 设置dom的shared_info里面
return 1; // evtchn_pending数组中对于元素的对应位(根据port)为1
此evtchn的pending位都被置1。表明此evtchn需要/正在被某个VCPU处理。
if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
!test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
&vcpu_info(v, evtchn_pending_sel)) )
如果此evtchn的mask位没有设置,则设置对应的VCPU的evtchn_pending_sel位(evtchn_pending_sel与evtchn_pending关联)。使该VCPU可以感知正在处理哪个event,即定位pending状态的evtchn。
{
vcpu_mark_events_pending(v);
同时,如果evtchn_pending_sel设置成功,则设置VCPU的evtchn_upcall_pending。表明本VCPU需要/正在处理event。
}
/* Check if some VCPU might be polling for this event. */
if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
return 0;
/* Wake any interested (or potentially interested) pollers. */
for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
vcpuid < d->max_vcpus;
vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
{
v = d->vcpu[vcpuid];
if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
test_and_clear_bit(vcpuid, d->poll_mask) )
{
v->poll_evtchn = 0;
vcpu_unblock(v);
}
}
return 0;
}
#define shared_info(d, field) __shared_info(d, (d)->shared_info, field)
#define __shared_info(d, s, field) ((s)->field)
void vcpu_mark_events_pending(struct vcpu *v)
{
int already_pending = test_and_set_bit(
0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
if ( already_pending )
return;
if ( is_hvm_vcpu(v) )
hvm_assert_evtchn_irq(v);
else
vcpu_kick(v);
}
void mask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info; // 获得shared_info
synch_set_bit(port, s->evtchn_mask); // 设置里面的evtchn_mask
}
EXPORT_SYMBOL_GPL(mask_evtchn);
void unmask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info; // 获得shared_info
unsigned int cpu = smp_processor_id();
vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; // 获得当前的VCPU
BUG_ON(!irqs_disabled());
/* Slow path (hypercall) if this is a non-local port. */
if (unlikely(cpu != cpu_from_evtchn(port))) { // 如果本evtchn没有绑定到本VCPU
struct evtchn_unmask unmask = { .port = port };
VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask)); // hypercall处理
return;
}
synch_clear_bit(port, s->evtchn_mask); // 取消mask
/* Did we miss an interrupt 'edge'? Re-fire if so. */
if (synch_test_bit(port, s->evtchn_pending) && // 如果存在未决evtchn
!synch_test_and_set_bit(port / BITS_PER_LONG,
&vcpu_info->evtchn_pending_sel))
vcpu_info->evtchn_upcall_pending = 1; // 设置VCPU的evtchn_upcall_pending
}
EXPORT_SYMBOL_GPL(unmask_evtchn);
结构体:
struct evtchn_op {
uint32_t cmd; /* EVTCHNOP_* */
union {
struct evtchn_alloc_unbound alloc_unbound;
struct evtchn_bind_interdomain bind_interdomain;
struct evtchn_bind_virq bind_virq;
struct evtchn_bind_pirq bind_pirq;
struct evtchn_bind_ipi bind_ipi;
struct evtchn_close close;
struct evtchn_send send;
struct evtchn_status status;
struct evtchn_bind_vcpu bind_vcpu;
struct evtchn_unmask unmask;
} u;
};
typedef struct evtchn_op evtchn_op_t;
struct shared_info {
struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
unsigned long evtchn_pending[sizeof(unsigned long) * 8]; // 32个long,有32*32=1024位
unsigned long evtchn_mask[sizeof(unsigned long) * 8];
uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
struct arch_shared_info arch;
}
struct vcpu_info {
uint8_t evtchn_upcall_pending; // 针对所有evtchn的
uint8_t evtchn_upcall_mask; // 设置为1,则此VCPU将屏蔽所有的evtchn。
unsigned long evtchn_pending_sel; // 每一位对应evtchn_pinding中一组32个evtchn。设置方法
struct arch_vcpu_info arch;
struct vcpu_time_info time;
}; /* 64 bytes (x86) */
/*
* EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
* accepting interdomain bindings from domain <remote_dom>. A fresh port
* is allocated in <dom> and returned as <port>.
* NOTES:
* 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
* 2. <rdom> may be DOMID_SELF, allowing loopback connections.
*/
#define EVTCHNOP_alloc_unbound 6
struct evtchn_alloc_unbound {
/* IN parameters */
domid_t dom, remote_dom;
/* OUT parameters */
evtchn_port_t port; // 用于通信的port
};
typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
/*
* EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
* the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
* a port that is unbound and marked as accepting bindings from the calling // 参考EVTCHNOP_alloc_unbound
* domain. A fresh port is allocated in the calling domain and returned as
* <local_port>.
* NOTES:
* 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
#define EVTCHNOP_bind_interdomain 0
struct evtchn_bind_interdomain {
/* IN parameters. */
domid_t remote_dom;
evtchn_port_t remote_port;
/* OUT parameters. */
evtchn_port_t local_port;
};
typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
/*
* EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
* vcpu.
* NOTES:
* 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
* in xen.h for the classification of each VIRQ.
* 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
* re-bound via EVTCHNOP_bind_vcpu.
* 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
* The allocated event channel is bound to the specified vcpu and the
* binding cannot be changed.
*/
#define EVTCHNOP_bind_virq 1
struct evtchn_bind_virq {
/* IN parameters. */
uint32_t virq;
uint32_t vcpu;
/* OUT parameters. */
evtchn_port_t port;
};
typedef struct evtchn_bind_virq evtchn_bind_virq_t;
/*
* EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
* NOTES:
* 1. A physical IRQ may be bound to at most one event channel per domain.
* 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
*/
#define EVTCHNOP_bind_pirq 2
struct evtchn_bind_pirq {
/* IN parameters. */
uint32_t pirq;
#define BIND_PIRQ__WILL_SHARE 1
uint32_t flags; /* BIND_PIRQ__* */
/* OUT parameters. */
evtchn_port_t port;
};
typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
/*
* EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
* NOTES:
* 1. The allocated event channel is bound to the specified vcpu. The binding
* may not be changed.
*/
#define EVTCHNOP_bind_ipi 7
struct evtchn_bind_ipi {
uint32_t vcpu;
/* OUT parameters. */
evtchn_port_t port;
};
typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
/*
* EVTCHNOP_close: Close a local event channel <port>. If the channel is
* interdomain then the remote end is placed in the unbound state
* (EVTCHNSTAT_unbound), awaiting a new connection.
*/
#define EVTCHNOP_close 3
struct evtchn_close {
/* IN parameters. */
evtchn_port_t port;
};
typedef struct evtchn_close evtchn_close_t;
/*
* EVTCHNOP_send: Send an event to the remote end of the channel whose local
* endpoint is <port>.
*/
#define EVTCHNOP_send 4
struct evtchn_send {
/* IN parameters. */
evtchn_port_t port;
};
typedef struct evtchn_send evtchn_send_t;
/*
* EVTCHNOP_status:Get the current status of the communication channel which
* has an endpoint at <dom, port>.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may obtain the status of an event
* channel for which <dom> is not DOMID_SELF.
*/
#define EVTCHNOP_status 5
struct evtchn_status {
/* IN parameters */
domid_t dom;
evtchn_port_t port;
/* OUT parameters */
#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
uint32_t status;
uint32_t vcpu; /* VCPU to which this channel is bound. */
union {
struct {
domid_t dom;
} unbound; /* EVTCHNSTAT_unbound */ // 如果是unbound状态,那么此port开放给哪个dom用
struct {
domid_t dom;
evtchn_port_t port;
} interdomain; /* EVTCHNSTAT_interdomain */ // 如果是interdomain状态,那么连接的远程dom和port?
uint32_t pirq; /* EVTCHNSTAT_pirq */
uint32_t virq; /* EVTCHNSTAT_virq */
} u;
};
typedef struct evtchn_status evtchn_status_t;
/*
* EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
* event is pending.
* NOTES:
* 1. IPI-bound channels always notify the vcpu specified at bind time.
* This binding cannot be changed.
* 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
* This binding cannot be changed.
* 3. All other channels notify vcpu0 by default. This default is set when
* the channel is allocated (a port that is freed and subsequently reused
* has its binding reset to vcpu0).
*/
#define EVTCHNOP_bind_vcpu 8
struct evtchn_bind_vcpu {
/* IN parameters. */
evtchn_port_t port;
uint32_t vcpu;
};
typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
/*
* EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
* a notification to the appropriate VCPU if an event is pending.
*/
#define EVTCHNOP_unmask 9
struct evtchn_unmask {
/* IN parameters. */
evtchn_port_t port;
};
typedef struct evtchn_unmask evtchn_unmask_t;
/*
* EVTCHNOP_reset: Close all event channels associated with specified domain.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
*/
#define EVTCHNOP_reset 10
struct evtchn_reset {
/* IN parameters. */
domid_t dom;
};
typedef struct evtchn_reset evtchn_reset_t; |
|