这篇教程C++ wait_event函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中wait_event函数的典型用法代码示例。如果您正苦于以下问题:C++ wait_event函数的具体用法?C++ wait_event怎么用?C++ wait_event使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了wait_event函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: ipath_modify_qpint ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata){ struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; int lastwqe = 0; int ret; spin_lock_irq(&qp->s_lock); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto inval; if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid == 0 || attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) goto inval; if ((attr->ah_attr.ah_flags & IB_AH_GRH) && (attr->ah_attr.grh.sgid_index > 1)) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; /* * don't allow invalid Path MTU values or greater than 2048 * unless we are configured for a 4KB MTU */ if ((attr_mask & IB_QP_PATH_MTU) && (ib_mtu_enum_to_int(attr->path_mtu) == -1 || (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096))) goto inval; if (attr_mask & IB_QP_PATH_MIG_STATE) if (attr->path_mig_state != IB_MIG_MIGRATED && attr->path_mig_state != IB_MIG_REARM) goto inval; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) goto inval; switch (new_state) { case IB_QPS_RESET: if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; spin_lock(&dev->pending_lock); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); qp->s_flags &= ~IPATH_S_ANY_WAIT; spin_unlock_irq(&qp->s_lock); /* Stop the sending tasklet */ tasklet_kill(&qp->s_task); wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); spin_lock_irq(&qp->s_lock); } ipath_reset_qp(qp, ibqp->qp_type); break; case IB_QPS_SQD: qp->s_draining = qp->s_last != qp->s_cur; qp->state = new_state; break; case IB_QPS_SQE: if (qp->ibqp.qp_type == IB_QPT_RC) goto inval; qp->state = new_state; break; case IB_QPS_ERR: lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: qp->state = new_state; break; }//.........这里部分代码省略.........
开发者ID:Medvedroid,项目名称:OT_903D-kernel-2.6.35.7,代码行数:101,
示例2: kbase_instr_hwcnt_disable/** * @brief Disable HW counters collection * * Note: might sleep, waiting for an ongoing dump to complete */mali_error kbase_instr_hwcnt_disable(struct kbase_context *kctx){ unsigned long flags, pm_flags; mali_error err = MALI_ERROR_FUNCTION_FAILED; u32 irq_mask; struct kbase_device *kbdev; KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); while (1) { spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if (kbdev->hwcnt.state == KBASE_INSTR_STATE_DISABLED) { /* Instrumentation is not enabled */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); goto out; } if (kbdev->hwcnt.kctx != kctx) { /* Instrumentation has been setup for another context */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); goto out; } if (kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE) break; spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); /* Ongoing dump/setup - wait for its completion */ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); } kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED; kbdev->hwcnt.triggered = 0; /* Disable interrupt */ spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags); irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL); spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags); /* Disable the counters */ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx); kbdev->hwcnt.kctx = NULL; kbdev->hwcnt.addr = 0ULL; kbase_pm_ca_instr_disable(kbdev); kbase_pm_unrequest_cores(kbdev, MALI_TRUE, kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER)); spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); /* Release the context. This had its own Power Manager Active reference */ kbasep_js_release_privileged_ctx(kbdev, kctx); /* Also release our Power Manager Active reference */ kbase_pm_context_idle(kbdev); dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx); err = MALI_ERROR_NONE; out: return err;}
开发者ID:ColinIanKing,项目名称:m576,代码行数:74,
示例3: kbase_instr_hwcnt_enable_internalSTATIC mali_error kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup){ unsigned long flags, pm_flags; mali_error err = MALI_ERROR_FUNCTION_FAILED; struct kbasep_js_device_data *js_devdata; u32 irq_mask; int ret; u64 shader_cores_needed; KBASE_DEBUG_ASSERT(NULL != kctx); KBASE_DEBUG_ASSERT(NULL != kbdev); KBASE_DEBUG_ASSERT(NULL != setup); KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx); shader_cores_needed = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER); js_devdata = &kbdev->js_data; /* alignment failure */ if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1))) goto out_err; /* Override core availability policy to ensure all cores are available */ kbase_pm_ca_instr_enable(kbdev); /* Mark the context as active so the GPU is kept turned on */ /* A suspend won't happen here, because we're in a syscall from a userspace * thread. */ kbase_pm_context_active(kbdev); /* Request the cores early on synchronously - we'll release them on any errors * (e.g. instrumentation already active) */ kbase_pm_request_cores_sync(kbdev, MALI_TRUE, shader_cores_needed); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /* GPU is being reset */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); } if (kbdev->hwcnt.state != KBASE_INSTR_STATE_DISABLED) { /* Instrumentation is already enabled */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); goto out_unrequest_cores; } /* Enable interrupt */ spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags); irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | PRFCNT_SAMPLE_COMPLETED, NULL); spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags); /* In use, this context is the owner */ kbdev->hwcnt.kctx = kctx; /* Remember the dump address so we can reprogram it later */ kbdev->hwcnt.addr = setup->dump_buffer; /* Remember all the settings for suspend/resume */ if (&kbdev->hwcnt.suspended_state != setup) memcpy(&kbdev->hwcnt.suspended_state, setup, sizeof(kbdev->hwcnt.suspended_state)); /* Request the clean */ kbdev->hwcnt.state = KBASE_INSTR_STATE_REQUEST_CLEAN; kbdev->hwcnt.triggered = 0; /* Clean&invalidate the caches so we're sure the mmu tables for the dump buffer is valid */ ret = queue_work(kbdev->hwcnt.cache_clean_wq, &kbdev->hwcnt.cache_clean_work); KBASE_DEBUG_ASSERT(ret); spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); /* Wait for cacheclean to complete */ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE); /* Schedule the context in */ kbasep_js_schedule_privileged_ctx(kbdev, kctx); /* Configure */ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), setup->dump_buffer & 0xFFFFFFFF, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), setup->dump_buffer >> 32, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), setup->jm_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), setup->shader_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), setup->l3_cache_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), setup->mmu_l2_bm, kctx); /* Due to PRLAM-8186 we need to disable the Tiler before we enable the HW counter dump. */ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, kctx); else kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, kctx); /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump */ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);//.........这里部分代码省略.........
开发者ID:ColinIanKing,项目名称:m576,代码行数:101,
示例4: zpios_threads_runstatic intzpios_threads_run(run_args_t *run_args){ struct task_struct *tsk, **tsks; thread_data_t *thr = NULL; zpios_time_t *tt = &(run_args->stats.total_time); zpios_time_t *tw = &(run_args->stats.wr_time); zpios_time_t *tr = &(run_args->stats.rd_time); int i, rc = 0, tc = run_args->thread_count; tsks = kmem_zalloc(sizeof (struct task_struct *) * tc, KM_SLEEP); if (tsks == NULL) { rc = -ENOMEM; goto cleanup2; } run_args->threads = kmem_zalloc(sizeof (thread_data_t *)*tc, KM_SLEEP); if (run_args->threads == NULL) { rc = -ENOMEM; goto cleanup; } init_waitqueue_head(&run_args->waitq); run_args->threads_done = 0; /* Create all the needed threads which will sleep until awoken */ for (i = 0; i < tc; i++) { thr = kmem_zalloc(sizeof (thread_data_t), KM_SLEEP); if (thr == NULL) { rc = -ENOMEM; goto taskerr; } thr->thread_no = i; thr->run_args = run_args; thr->rc = 0; mutex_init(&thr->lock, NULL, MUTEX_DEFAULT, NULL); run_args->threads[i] = thr; tsk = kthread_create(zpios_thread_main, (void *)thr, "%s/%d", "zpios_io", i); if (IS_ERR(tsk)) { rc = -EINVAL; goto taskerr; } tsks[i] = tsk; } tt->start = zpios_timespec_now(); /* Wake up all threads for write phase */ (void) zpios_upcall(run_args->pre, PHASE_PRE_WRITE, run_args, 0); for (i = 0; i < tc; i++) wake_up_process(tsks[i]); /* Wait for write phase to complete */ tw->start = zpios_timespec_now(); wait_event(run_args->waitq, zpios_thread_done(run_args)); tw->stop = zpios_timespec_now(); (void) zpios_upcall(run_args->post, PHASE_POST_WRITE, run_args, rc); for (i = 0; i < tc; i++) { thr = run_args->threads[i]; mutex_enter(&thr->lock); if (!rc && thr->rc) rc = thr->rc; run_args->stats.wr_data += thr->stats.wr_data; run_args->stats.wr_chunks += thr->stats.wr_chunks; mutex_exit(&thr->lock); } if (rc) { /* Wake up all threads and tell them to exit */ for (i = 0; i < tc; i++) { mutex_enter(&thr->lock); thr->rc = rc; mutex_exit(&thr->lock); wake_up_process(tsks[i]); } goto out; } mutex_enter(&run_args->lock_ctl); ASSERT(run_args->threads_done == run_args->thread_count); run_args->threads_done = 0; mutex_exit(&run_args->lock_ctl); /* Wake up all threads for read phase */ (void) zpios_upcall(run_args->pre, PHASE_PRE_READ, run_args, 0); for (i = 0; i < tc; i++) wake_up_process(tsks[i]); /* Wait for read phase to complete */ tr->start = zpios_timespec_now(); wait_event(run_args->waitq, zpios_thread_done(run_args));//.........这里部分代码省略.........
开发者ID:KSreeHarsha,项目名称:zfscache,代码行数:101,
示例5: RcvMsgasmlinkage long RcvMsg(pid_t *sender, void *msg, int *len, bool block){ struct Mailbox* self; struct list* hashLink; struct Message* messages; spin_lock_irq(&creationLock); if ((self = HashFind(current->tgid)) == NULL){ //Allocate and initialize the mailbox for the receiver printk(KERN_INFO "Mailbox created via rcv for %d /n", current->tgid); self = kmem_cache_alloc(mailboxCache, GFP_KERNEL); self->owner = current->tgid; self->numberofMessages = 0; self->status = false; self->message = NULL; atomic_set(&self->references, 0); self->waitingFull = 0; self->waitingEmpty = 0; init_waitqueue_head(&self->canExit); spin_lock_init(&self->lock); init_waitqueue_head(&self->notEmpty); init_waitqueue_head(&self->notFull); //Allocate and initialize the hash link for the //receiver hashLink = kmem_cache_alloc(listCache, GFP_KERNEL); hashLink->mailbox = self; hashLink->pid = current->tgid; hashLink->next = NULL; HashAdd(current->tgid, hashLink); } atomic_add(1, &self->references); spin_unlock_irq(&creationLock); spin_lock_irq(&self->lock);// If the mailbox of the calling process is stopped, return an error if (self->status == true && self->numberofMessages == 0){ atomic_sub(1, &self->references); wake_up_all(&self->canExit); spin_unlock_irq(&self->lock); return MAILBOX_STOPPED; }// Number of messages in the process' mailbox is 0, and block is false,// so return an error if (self->numberofMessages == 0 && block == false){ atomic_sub(1, &self->references); wake_up_all(&self->canExit); spin_unlock_irq(&self->lock); return MAILBOX_EMPTY; }// Process' mailbox is empty and block is true, so need to wait on the wait queue until a message is sent else if (self->numberofMessages == 0 && block == true){ self->waitingEmpty++; spin_unlock_irq(&self->lock); wait_event(self->notEmpty, (self->numberofMessages > 0 || self->status == true)); spin_lock_irq(&self->lock); self->waitingEmpty--; if (self->waitingEmpty == 0){ wake_up(&self->canExit); } if (self->status == true){ self->waitingEmpty--; atomic_sub(1, &self->references); wake_up_all(&self->canExit); spin_unlock_irq(&self->lock); return MAILBOX_STOPPED; } }// Fills the given pointers with the data that was contained in the message messages = self->message; if(copy_to_user(sender, &messages->sender, sizeof(pid_t)) || copy_to_user(len, &messages->length, sizeof(int)) || copy_to_user(msg, messages->msg, messages->length)){ atomic_sub(1, &self->references); wake_up_all(&self->canExit); spin_unlock_irq(&self->lock); return MSG_ARG_ERROR; } self->message = self->message->next; kmem_cache_free(contentCache, messages->msg); kmem_cache_free(messageCache, messages); self->numberofMessages--; wake_up_all(&self->notFull); atomic_sub(1, &self->references); wake_up_all(&self->canExit); spin_unlock_irq(&self->lock); return 0;}
开发者ID:andrewwhan,项目名称:Old-Classwork,代码行数:83,
示例6: loopback_test/** * Loopback Test */static void loopback_test(void){ int ret = 0 ; u32 read_avail = 0; u32 write_avail = 0; while (1) { if (test_ctx->exit_flag) { pr_info(MODULE_NAME ":Exit Test./n"); return; } pr_info(MODULE_NAME "--LOOPBACK WAIT FOR EVENT--./n"); /* wait for data ready event */ wait_event(test_ctx->wait_q, test_ctx->rx_notify_count); test_ctx->rx_notify_count--; read_avail = sdio_read_avail(test_ctx->ch); if (read_avail == 0) continue; write_avail = sdio_write_avail(test_ctx->ch); if (write_avail < read_avail) { pr_info(MODULE_NAME ":not enough write avail./n"); continue; } ret = sdio_read(test_ctx->ch, test_ctx->buf, read_avail); if (ret) { pr_info(MODULE_NAME ":worker, sdio_read err=%d./n", -ret); continue; } test_ctx->rx_bytes += read_avail; pr_debug(MODULE_NAME ":worker total rx bytes = 0x%x./n", test_ctx->rx_bytes); ret = sdio_write(test_ctx->ch, test_ctx->buf, read_avail); if (ret) { pr_info(MODULE_NAME ":loopback sdio_write err=%d./n", -ret); continue; } test_ctx->tx_bytes += read_avail; pr_debug(MODULE_NAME ":loopback total tx bytes = 0x%x./n", test_ctx->tx_bytes); } /* end of while */}
开发者ID:0-t,项目名称:samsung-kernel-msm7x30,代码行数:64,
示例7: vhost_net_ubuf_put_and_waitstatic void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs){ vhost_net_ubuf_put(ubufs); wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));}
开发者ID:Chong-Li,项目名称:cse522,代码行数:5,
示例8: dlm_posix_lockint dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl){ struct dlm_ls *ls; struct plock_op *op; struct plock_xop *xop; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; xop = kzalloc(sizeof(*xop), GFP_NOFS); if (!xop) { rv = -ENOMEM; goto out; } op = &xop->xop; op->info.optype = DLM_PLOCK_OP_LOCK; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); op->info.wait = IS_SETLKW(cmd); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; if (fl->fl_lmops && fl->fl_lmops->fl_grant) { /* fl_owner is lockd which doesn't distinguish processes on the nfs client */ op->info.owner = (__u64) fl->fl_pid; xop->callback = fl->fl_lmops->fl_grant; locks_init_lock(&xop->flc); locks_copy_lock(&xop->flc, fl); xop->fl = fl; xop->file = file; } else { op->info.owner = (__u64)(long) fl->fl_owner; xop->callback = NULL; } send_op(op); if (xop->callback == NULL) wait_event(recv_wq, (op->done != 0)); else { rv = FILE_LOCK_DEFERRED; goto out; } spin_lock(&ops_lock); if (!list_empty(&op->list)) { log_error(ls, "dlm_posix_lock: op on list %llx", (unsigned long long)number); list_del(&op->list); } spin_unlock(&ops_lock); rv = op->info.rv; if (!rv) { if (posix_lock_file_wait(file, fl) < 0) log_error(ls, "dlm_posix_lock: vfs lock error %llx", (unsigned long long)number); } kfree(xop);out: dlm_put_lockspace(ls); return rv;}
开发者ID:Addision,项目名称:LVS,代码行数:71,
示例9: fio_select_lockvoid fio_select_lock(int module){ wait_event(fio_wait, fio_check_free(module)); __fio_select_lock(module);}
开发者ID:ShawnOfMisfit,项目名称:ambarella,代码行数:5,
示例10: dlm_posix_getint dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl){ struct dlm_ls *ls; struct plock_op *op; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } op->info.optype = DLM_PLOCK_OP_GET; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; if (fl->fl_lmops && fl->fl_lmops->fl_grant) op->info.owner = (__u64) fl->fl_pid; else op->info.owner = (__u64)(long) fl->fl_owner; send_op(op); wait_event(recv_wq, (op->done != 0)); spin_lock(&ops_lock); if (!list_empty(&op->list)) { log_error(ls, "dlm_posix_get: op on list %llx", (unsigned long long)number); list_del(&op->list); } spin_unlock(&ops_lock); /* info.rv from userspace is 1 for conflict, 0 for no-conflict, -ENOENT if there are no locks on the file */ rv = op->info.rv; fl->fl_type = F_UNLCK; if (rv == -ENOENT) rv = 0; else if (rv > 0) { locks_init_lock(fl); fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK; fl->fl_flags = FL_POSIX; fl->fl_pid = op->info.pid; fl->fl_start = op->info.start; fl->fl_end = op->info.end; rv = 0; } kfree(op);out: dlm_put_lockspace(ls); return rv;}
开发者ID:Addision,项目名称:LVS,代码行数:63,
示例11: grmnet_ctrl_smd_read_wstatic void grmnet_ctrl_smd_read_w(struct work_struct *w){ struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w); struct rmnet_ctrl_port *port = c->port; int sz, total_received, read_avail; int len; void *buf; unsigned long flags; spin_lock_irqsave(&port->port_lock, flags); while (c->ch) { sz = smd_cur_packet_size(c->ch); if (sz <= 0) break; spin_unlock_irqrestore(&port->port_lock, flags); buf = kmalloc(sz, GFP_KERNEL); if (!buf) return; total_received = 0; while (total_received < sz) { wait_event(c->smd_wait_q, ((read_avail = smd_read_avail(c->ch)) || (c->ch == 0))); if (read_avail < 0 || c->ch == 0) { pr_err("%s:smd read_avail failure:%d or channel closed ch=%p", __func__, read_avail, c->ch); kfree(buf); return; } if (read_avail + total_received > sz) { pr_err("%s: SMD sending incorrect pkt/n", __func__); kfree(buf); return; } len = smd_read(c->ch, buf + total_received, read_avail); if (len <= 0) { pr_err("%s: smd read failure %d/n", __func__, len); kfree(buf); return; } total_received += len; } /* send it to USB here */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb && port->port_usb->send_cpkt_response) { port->port_usb->send_cpkt_response(port->port_usb, buf, sz); c->to_host++; } kfree(buf); } spin_unlock_irqrestore(&port->port_lock, flags);}
开发者ID:98416,项目名称:Z7Max_NX505J_H129_kernel,代码行数:62,
示例12: freeze_enterstatic void freeze_enter(void){ wait_event(suspend_freeze_wait_head, suspend_freeze_wake);}
开发者ID:eckucukoglu,项目名称:sober-kernel,代码行数:4,
示例13: ext4_ioend_waitvoid ext4_ioend_wait(struct inode *inode){ wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));}
开发者ID:mb3dot,项目名称:community-b3-kernel,代码行数:6,
示例14: nfs_do_call_unlinkstatic int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data){ struct rpc_message msg = { .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; struct rpc_task_setup task_setup_data = { .rpc_message = &msg, .callback_ops = &nfs_unlink_ops, .callback_data = data, .workqueue = nfsiod_workqueue, .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; struct dentry *alias; alias = d_lookup(parent, &data->args.name); if (alias != NULL) { int ret; void *devname_garbage = NULL; /* * Hey, we raced with lookup... See if we need to transfer * the sillyrename information to the aliased dentry. */ nfs_free_dname(data); ret = nfs_copy_dname(alias, data); spin_lock(&alias->d_lock); if (ret == 0 && alias->d_inode != NULL && !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { devname_garbage = alias->d_fsdata; alias->d_fsdata = data; alias->d_flags |= DCACHE_NFSFS_RENAMED; ret = 1; } else ret = 0; spin_unlock(&alias->d_lock); nfs_dec_sillycount(dir); dput(alias); /* * If we'd displaced old cached devname, free it. At that * point dentry is definitely not a root, so we won't need * that anymore. */ kfree(devname_garbage); return ret; } data->dir = igrab(dir); if (!data->dir) { nfs_dec_sillycount(dir); return 0; } nfs_sb_active(dir->i_sb); data->args.fh = NFS_FH(dir); nfs_fattr_init(data->res.dir_attr); NFS_PROTO(dir)->unlink_setup(&msg, dir); task_setup_data.rpc_client = NFS_CLIENT(dir); task = rpc_run_task(&task_setup_data); if (!IS_ERR(task)) rpc_put_task_async(task); return 1;}static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data){ struct dentry *parent; struct inode *dir; int ret = 0; parent = dget_parent(dentry); if (parent == NULL) goto out_free; dir = parent->d_inode; /* Non-exclusive lock protects against concurrent lookup() calls */ spin_lock(&dir->i_lock); if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) { /* Deferred delete */ hlist_add_head(&data->list, &NFS_I(dir)->silly_list); spin_unlock(&dir->i_lock); ret = 1; goto out_dput; } spin_unlock(&dir->i_lock); ret = nfs_do_call_unlink(parent, dir, data);out_dput: dput(parent);out_free: return ret;}void nfs_block_sillyrename(struct dentry *dentry){ struct nfs_inode *nfsi = NFS_I(dentry->d_inode); wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1);}//.........这里部分代码省略.........
开发者ID:0xroot,项目名称:Blackphone-BP1-Kernel,代码行数:101,
示例15: ptrace_attachstatic int ptrace_attach(struct task_struct *task){ bool wait_trap = false; int retval; audit_ptrace(task); retval = -EPERM; if (unlikely(task->flags & PF_KTHREAD)) goto out; if (same_thread_group(task, current)) goto out; /* * Protect exec's credential calculations against our interference; * interference; SUID, SGID and LSM creds get determined differently * under ptrace. */ retval = -ERESTARTNOINTR; if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) goto out; task_lock(task); retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); task_unlock(task); if (retval) goto unlock_creds; write_lock_irq(&tasklist_lock); retval = -EPERM; if (unlikely(task->exit_state)) goto unlock_tasklist; if (task->ptrace) goto unlock_tasklist; task->ptrace = PT_PTRACED; if (task_ns_capable(task, CAP_SYS_PTRACE)) task->ptrace |= PT_PTRACE_CAP; __ptrace_link(task, current); send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); spin_lock(&task->sighand->siglock); /* * If the task is already STOPPED, set GROUP_STOP_PENDING and * TRAPPING, and kick it so that it transits to TRACED. TRAPPING * will be cleared if the child completes the transition or any * event which clears the group stop states happens. We'll wait * for the transition to complete before returning from this * function. * * This hides STOPPED -> RUNNING -> TRACED transition from the * attaching thread but a different thread in the same group can * still observe the transient RUNNING state. IOW, if another * thread's WNOHANG wait(2) on the stopped tracee races against * ATTACH, the wait(2) may fail due to the transient RUNNING. * * The following task_is_stopped() test is safe as both transitions * in and out of STOPPED are protected by siglock. */ if (task_is_stopped(task)) { task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; signal_wake_up(task, 1); wait_trap = true; } spin_unlock(&task->sighand->siglock); retval = 0;unlock_tasklist: write_unlock_irq(&tasklist_lock);unlock_creds: mutex_unlock(&task->signal->cred_guard_mutex);out: if (wait_trap) wait_event(current->signal->wait_chldexit, !(task->group_stop & GROUP_STOP_TRAPPING)); return retval;}
开发者ID:1yankeedt,项目名称:D710BST_FL24_Kernel,代码行数:80,
示例16: sender_test/** * sender Test */static void sender_test(void){ int ret = 0 ; u32 read_avail = 0; u32 write_avail = 0; int packet_count = 0; int size = 512; u16 *buf16 = (u16 *) test_ctx->buf; int i; for (i = 0 ; i < size / 2 ; i++) buf16[i] = (u16) (i & 0xFFFF); sdio_set_write_threshold(test_ctx->ch, 4*1024); sdio_set_read_threshold(test_ctx->ch, 16*1024); /* N/A with Rx EOT */ sdio_set_poll_time(test_ctx->ch, 0); /* N/A with Rx EOT */ while (packet_count < 100) { if (test_ctx->exit_flag) { pr_info(MODULE_NAME ":Exit Test./n"); return; } pr_info(MODULE_NAME "--SENDER WAIT FOR EVENT--./n"); /* wait for data ready event */ write_avail = sdio_write_avail(test_ctx->ch); pr_debug(MODULE_NAME ":write_avail=%d/n", write_avail); if (write_avail < size) { wait_event(test_ctx->wait_q, test_ctx->tx_notify_count); test_ctx->tx_notify_count--; } write_avail = sdio_write_avail(test_ctx->ch); pr_debug(MODULE_NAME ":write_avail=%d/n", write_avail); if (write_avail < size) { pr_info(MODULE_NAME ":not enough write avail./n"); continue; } test_ctx->buf[0] = packet_count; test_ctx->buf[(size/4)-1] = packet_count; ret = sdio_write(test_ctx->ch, test_ctx->buf, size); if (ret) { pr_info(MODULE_NAME ":sender sdio_write err=%d./n", -ret); goto exit_err; } /* wait for read data ready event */ pr_debug(MODULE_NAME ":sender wait for rx data./n"); read_avail = sdio_read_avail(test_ctx->ch); wait_event(test_ctx->wait_q, test_ctx->rx_notify_count); test_ctx->rx_notify_count--; read_avail = sdio_read_avail(test_ctx->ch); if (read_avail != size) { pr_info(MODULE_NAME ":read_avail size %d not as expected./n", read_avail); goto exit_err; } memset(test_ctx->buf, 0x00, size); ret = sdio_read(test_ctx->ch, test_ctx->buf, size); if (ret) { pr_info(MODULE_NAME ":sender sdio_read err=%d./n", -ret); goto exit_err; } if ((test_ctx->buf[0] != packet_count) || (test_ctx->buf[(size/4)-1] != packet_count)) { pr_info(MODULE_NAME ":sender sdio_read WRONG DATA./n"); goto exit_err; } test_ctx->tx_bytes += size; test_ctx->rx_bytes += size; packet_count++; pr_debug(MODULE_NAME ":sender total rx bytes = 0x%x , packet#=%d./n", test_ctx->rx_bytes, packet_count); pr_debug(MODULE_NAME ":sender total tx bytes = 0x%x , packet#=%d./n", test_ctx->tx_bytes, packet_count); } /* end of while */ sdio_close(test_ctx->ch);//.........这里部分代码省略.........
开发者ID:0-t,项目名称:samsung-kernel-msm7x30,代码行数:101,
示例17: user_wait_on_blocked_lockstatic inline void user_wait_on_blocked_lock(struct user_lock_res *lockres){ wait_event(lockres->l_event, !user_check_wait_flag(lockres, USER_LOCK_BLOCKED));}
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:6,
示例18: unregister_unifi_sdio/* * --------------------------------------------------------------------------- * unregister_unifi_sdio * * Call from SDIO driver when it detects that UniFi has been removed. * * Arguments: * bus_id Number of the card that was ejected. * * Returns: * None. * --------------------------------------------------------------------------- */static voidunregister_unifi_sdio(int bus_id){ unifi_priv_t *priv; int interfaceTag=0; u8 reason = CONFIG_IND_EXIT; if ((bus_id < 0) || (bus_id >= MAX_UNIFI_DEVS)) { unifi_error(NULL, "unregister_unifi_sdio: invalid device %d/n", bus_id); return; } priv = Unifi_instances[bus_id]; if (priv == NULL) { unifi_error(priv, "unregister_unifi_sdio: device %d is not registered/n", bus_id); return; } /* Stop the network traffic before freeing the core. */ for(interfaceTag=0;interfaceTag<priv->totalInterfaceCount;interfaceTag++) { netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; if(interfacePriv->netdev_registered) { netif_carrier_off(priv->netdev[interfaceTag]); netif_tx_stop_all_queues(priv->netdev[interfaceTag]); } }#ifdef CSR_NATIVE_LINUX /* * If the unifi thread was started, signal it to stop. This * should cause any userspace processes with open unifi device to * close them. */ uf_stop_thread(priv, &priv->bh_thread); /* Unregister the interrupt handler */ if (csr_sdio_linux_remove_irq(priv->sdio)) { unifi_notice(priv, "csr_sdio_linux_remove_irq failed to talk to card./n"); } /* Ensure no MLME functions are waiting on a the mlme_event semaphore. */ uf_abort_mlme(priv);#endif /* CSR_NATIVE_LINUX */ ul_log_config_ind(priv, &reason, sizeof(u8)); /* Deregister the UDI hook from the core. */ unifi_remove_udi_hook(priv->card, logging_handler); uf_put_instance(bus_id); /* * Wait until the device is cleaned up. i.e., when all userspace * processes have closed any open unifi devices. */ wait_event(Unifi_cleanup_wq, In_use[bus_id] == UNIFI_DEV_CLEANUP); unifi_trace(NULL, UDBG5, "Received clean up event/n"); /* Now we can free the private context and the char device nodes */ cleanup_unifi_sdio(priv);} /* unregister_unifi_sdio() */
开发者ID:wpwrak,项目名称:ben-wpan-linux,代码行数:80,
示例19: ufs_test_run_data_integrity_teststatic int ufs_test_run_data_integrity_test(struct test_iosched *test_iosched){ int ret = 0; int i, j; unsigned int start_sec, num_bios, retries = NUM_UNLUCKY_RETRIES; struct request_queue *q = test_iosched->req_q; int sectors[QUEUE_MAX_REQUESTS] = {0}; struct ufs_test_data *utd = test_iosched->blk_dev_test_data; start_sec = test_iosched->start_sector; utd->queue_complete = false; if (utd->random_test_seed != 0) { ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios); } else { num_bios = DEFAULT_NUM_OF_BIOS; utd->random_test_seed = MAGIC_SEED; } /* Adding write requests */ pr_info("%s: Adding %d write requests, first req_id=%d", __func__, QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id); for (i = 0; i < QUEUE_MAX_REQUESTS; i++) { /* make sure that we didn't draw the same start_sector twice */ while (retries--) { pseudo_rnd_sector_and_size(&utd->random_test_seed, test_iosched->start_sector, &start_sec, &num_bios); sectors[i] = start_sec; for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++) /* just increment j */; if (j == i) break; } if (!retries) { pr_err("%s: too many unlucky start_sector draw retries", __func__); ret = -EINVAL; return ret; } retries = NUM_UNLUCKY_RETRIES; ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE, start_sec, 1, i, long_test_free_end_io_fn); if (ret) { pr_err("%s: failed to add a write request", __func__); return ret; } } /* waiting for the write request to finish */ blk_post_runtime_resume(q, 0); wait_event(utd->wait_q, utd->queue_complete); /* Adding read requests */ pr_info("%s: Adding %d read requests, first req_id=%d", __func__, QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id); for (i = 0; i < QUEUE_MAX_REQUESTS; i++) { ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ, sectors[i], 1, i, long_test_free_end_io_fn); if (ret) { pr_err("%s: failed to add a read request", __func__); return ret; } } blk_post_runtime_resume(q, 0); return ret;}
开发者ID:youyim,项目名称:Z9Max_NX510J_V1_kernel,代码行数:73,
示例20: tcm_loop_device_reset/* * Called from SCSI EH process context to issue a LUN_RESET TMR * to struct scsi_device */static int tcm_loop_device_reset(struct scsi_cmnd *sc){ struct se_cmd *se_cmd = NULL; struct se_portal_group *se_tpg; struct se_session *se_sess; struct tcm_loop_cmd *tl_cmd = NULL; struct tcm_loop_hba *tl_hba; struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tmr *tl_tmr = NULL; struct tcm_loop_tpg *tl_tpg; int ret = FAILED; /* * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); /* * Locate the tl_nexus and se_sess pointers */ tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { pr_err("Unable to perform device reset without" " active I_T Nexus/n"); return FAILED; } se_sess = tl_nexus->se_sess; /* * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; se_tpg = &tl_tpg->tl_se_tpg; tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { pr_err("Unable to allocate memory for tl_cmd/n"); return FAILED; } tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); if (!tl_tmr) { pr_err("Unable to allocate memory for tl_tmr/n"); goto release; } init_waitqueue_head(&tl_tmr->tl_tmr_wait); se_cmd = &tl_cmd->tl_se_cmd; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, &tl_cmd->tl_sense_buf[0]); /* * Allocate the LUN_RESET TMR */ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); if (IS_ERR(se_cmd->se_tmr_req)) goto release; /* * Locate the underlying TCM struct se_lun from sc->device->lun */ if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) goto release; /* * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() * to wake us up. */ transport_generic_handle_tmr(se_cmd); wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); /* * The TMR LUN_RESET has completed, check the response status and * then release allocations. */ ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;release: if (se_cmd) transport_generic_free_cmd(se_cmd, 1); else kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); kfree(tl_tmr); return ret;}
开发者ID:GerardGarcia,项目名称:linux,代码行数:87,
示例21: start_usb_playbackstatic int start_usb_playback(struct ua101 *ua){ unsigned int i, frames; struct urb *urb; int err = 0; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->playback); tasklet_kill(&ua->playback_tasklet); err = enable_iso_interface(ua, INTF_PLAYBACK); if (err < 0) return err; clear_bit(PLAYBACK_URB_COMPLETED, &ua->states); ua->playback.urbs[0]->urb.complete = first_playback_urb_complete; spin_lock_irq(&ua->lock); INIT_LIST_HEAD(&ua->ready_playback_urbs); spin_unlock_irq(&ua->lock); /* * We submit the initial URBs all at once, so we have to wait for the * packet size FIFO to be full. */ wait_event(ua->rate_feedback_wait, ua->rate_feedback_count >= ua->playback.queue_length || !test_bit(USB_CAPTURE_RUNNING, &ua->states) || test_bit(DISCONNECTED, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) { stop_usb_playback(ua); return -ENODEV; } if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) { stop_usb_playback(ua); return -EIO; } for (i = 0; i < ua->playback.queue_length; ++i) { /* all initial URBs contain silence */ spin_lock_irq(&ua->lock); frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; spin_unlock_irq(&ua->lock); urb = &ua->playback.urbs[i]->urb; urb->iso_frame_desc[0].length = frames * ua->playback.frame_bytes; memset(urb->transfer_buffer, 0, urb->iso_frame_desc[0].length); } set_bit(USB_PLAYBACK_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->playback); if (err < 0) stop_usb_playback(ua); return err;}
开发者ID:3null,项目名称:linux,代码行数:63,
示例22: SendMsgasmlinkage long SendMsg(pid_t dest, void* msg, int len, bool block){ struct Mailbox* receiver; struct list* hashLink; struct Message* sendingMessage; struct Message* messages; int pidstatus; spin_lock_irq(&creationLock); pidstatus = verifyPID(dest); if(pidstatus == PIDDNE){ spin_unlock_irq(&creationLock); return MAILBOX_INVALID; } else if(pidstatus == KERNPID){ if(HashFind(dest) == NULL){ hashLink = kmem_cache_alloc(listCache, GFP_KERNEL); hashLink->mailbox = NULL; hashLink->next = NULL; HashAdd(dest, hashLink); } spin_unlock_irq(&creationLock); return MAILBOX_INVALID; } if ((receiver = HashFind(dest)) == NULL){ //Allocate and initialize the mailbox for the receiver printk(KERN_INFO "Mailbox created via send for %d /n", dest); receiver = kmem_cache_alloc(mailboxCache, GFP_KERNEL); receiver->owner = dest; receiver->numberofMessages = 0; receiver->status = false; receiver->message = NULL; atomic_set(&receiver->references, 0); receiver->waitingFull = 0; receiver->waitingEmpty = 0; init_waitqueue_head(&receiver->canExit); spin_lock_init(&receiver->lock); init_waitqueue_head(&receiver->notEmpty); init_waitqueue_head(&receiver->notFull); //Allocate and initialize the hash link for the receiver hashLink = kmem_cache_alloc(listCache, GFP_KERNEL); hashLink->mailbox = receiver; hashLink->pid = dest; hashLink->next = NULL; HashAdd(dest, hashLink); } if (atomic_read(&receiver->references) == -1){ spin_unlock_irq(&creationLock); return MAILBOX_ERROR; } atomic_add(1, &receiver->references); spin_unlock_irq(&creationLock); spin_lock_irq(&receiver->lock);//If the message length is greater than the max or less than zero, it returns the message length error if (len > MAX_MSG_SIZE || len < 0){ atomic_sub(1, &receiver->references); wake_up_all(&receiver->canExit); spin_unlock_irq(&receiver->lock); return MSG_LENGTH_ERROR; }//If the receiver's mailbox is stopped, it will return an error if (receiver->status == true){ atomic_sub(1, &receiver->references); wake_up_all(&receiver->canExit); spin_unlock_irq(&receiver->lock); return MAILBOX_STOPPED; } spin_unlock_irq(&receiver->lock);//Allocates and initializes the message to be sent sendingMessage = kmem_cache_alloc(messageCache, GFP_KERNEL); sendingMessage->sender = current->tgid; sendingMessage->length = len; sendingMessage->msg = kmem_cache_alloc(contentCache, GFP_KERNEL); copy_from_user(sendingMessage->msg, msg, len); sendingMessage->next = NULL; spin_lock_irq(&receiver->lock);// Mailbox is full, and block is set to false, need to return an error if (receiver->numberofMessages == 32 && block == false){ atomic_sub(1, &receiver->references); wake_up_all(&receiver->canExit); spin_unlock_irq(&receiver->lock); return MAILBOX_FULL; }// Mailbox is full, and block is true, so it needs to wait on the queue else if (receiver->numberofMessages == 32 && block == true){ receiver->waitingFull++; spin_unlock_irq(&receiver->lock); wait_event(receiver->notFull, (receiver->numberofMessages < 32 || receiver->status == true)); spin_lock_irq(&receiver->lock); receiver->waitingFull--; if (receiver->waitingFull == 0){ wake_up_all(&receiver->canExit); } if (receiver->status == true){ atomic_sub(1, &receiver->references); wake_up_all(&receiver->canExit); spin_unlock_irq(&receiver->lock); return MAILBOX_STOPPED; } } messages = receiver->message;// If the mailbox is empty, the current message being sent becomes the head of the receivers message list//.........这里部分代码省略.........
开发者ID:andrewwhan,项目名称:Old-Classwork,代码行数:101,
示例23: wait_for_device_probevoid wait_for_device_probe(void){ /* wait for the known devices to complete their probing */ wait_event(probe_waitqueue, atomic_read(&probe_count) == 0); async_synchronize_full();}
开发者ID:Medvedroid,项目名称:OT_903D-kernel-2.6.35.7,代码行数:6,
示例24: ext4_aiodio_waitstatic void ext4_aiodio_wait(struct inode *inode){ wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));}
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:6,
示例25: xenbus_thread_funcstatic void xenbus_thread_func(void *ign){ struct xsd_sockmsg msg; unsigned prod = xenstore_buf->rsp_prod; for (;;) { wait_event(xb_waitq, prod != xenstore_buf->rsp_prod); while (1) { prod = xenstore_buf->rsp_prod; DEBUG("Rsp_cons %d, rsp_prod %d./n", xenstore_buf->rsp_cons, xenstore_buf->rsp_prod); if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg)) break; rmb(); memcpy_from_ring(xenstore_buf->rsp, &msg, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), sizeof(msg)); DEBUG("Msg len %d, %d avail, id %d./n", msg.len + sizeof(msg), xenstore_buf->rsp_prod - xenstore_buf->rsp_cons, msg.req_id); if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg) + msg.len) break; DEBUG("Message is good./n"); if(msg.type == XS_WATCH_EVENT) { struct xenbus_event *event = malloc(sizeof(*event) + msg.len); xenbus_event_queue *events = NULL; char *data = (char*)event + sizeof(*event); struct watch *watch; memcpy_from_ring(xenstore_buf->rsp, data, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons + sizeof(msg)), msg.len); event->path = data; event->token = event->path + strlen(event->path) + 1; xenstore_buf->rsp_cons += msg.len + sizeof(msg); for (watch = watches; watch; watch = watch->next) if (!strcmp(watch->token, event->token)) { events = watch->events; break; } if (events) { event->next = *events; *events = event; wake_up(&xenbus_watch_queue); } else { printk("unexpected watch token %s/n", event->token); free(event); } } else { req_info[msg.req_id].reply = malloc(sizeof(msg) + msg.len); memcpy_from_ring(xenstore_buf->rsp, req_info[msg.req_id].reply, MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), msg.len + sizeof(msg)); xenstore_buf->rsp_cons += msg.len + sizeof(msg); wake_up(&req_info[msg.req_id].waitq); } } }}
开发者ID:CPFL,项目名称:gxen,代码行数:76,
示例26: kbase_instr_hwcnt_dump/** * @brief Issue Dump command to hardware and wait for completion */mali_error kbase_instr_hwcnt_dump(struct kbase_context *kctx){ unsigned long flags; mali_error err = MALI_ERROR_FUNCTION_FAILED; struct kbase_device *kbdev;#ifdef SEC_HWCNT if (kctx == NULL) { GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "kctx is NULL error in %s %d /n", __FUNCTION__, err); goto out; }#endif KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); err = kbase_instr_hwcnt_dump_irq(kctx); if (MALI_ERROR_NONE != err) { /* Can't dump HW counters */ GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "kbase_instr_hwcnt_dump_irq error in %s %d /n", __FUNCTION__, err); goto out; } /* Wait for dump & cacheclean to complete */#ifdef SEC_HWCNT if (kbdev->hwcnt.is_init) { int ret = wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout); if ((kbdev->hwcnt.trig_exception == 1) || (ret == 0)) { kbdev->hwcnt.trig_exception = 0; kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE; err = MALI_ERROR_FUNCTION_FAILED; GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "wait_event_timeout error in %s %d /n", __FUNCTION__, err); goto out; } } else#endif wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /* GPU is being reset */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);#ifdef SEC_HWCNT if (kbdev->hwcnt.is_init) wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout); else#endif wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); } if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) { err = MALI_ERROR_FUNCTION_FAILED; kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE; GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "hwcnt state is FAULT error in %s %d /n", __FUNCTION__, err); } else { /* Dump done */ KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE); err = MALI_ERROR_NONE; } spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); out: return err;}
开发者ID:ColinIanKing,项目名称:m576,代码行数:70,
示例27: xb_write/* Send data to xenbus. This can block. All of the requests are seen by xenbus as if sent atomically. The header is added automatically, using type %type, req_id %req_id, and trans_id %trans_id. */static void xb_write(int type, int req_id, xenbus_transaction_t trans_id, const struct write_req *req, int nr_reqs){ XENSTORE_RING_IDX prod; int r; int len = 0; const struct write_req *cur_req; int req_off; int total_off; int this_chunk; struct xsd_sockmsg m = {.type = type, .req_id = req_id, .tx_id = trans_id }; struct write_req header_req = { &m, sizeof(m) }; for (r = 0; r < nr_reqs; r++) len += req[r].len; m.len = len; len += sizeof(m); cur_req = &header_req; BUG_ON(len > XENSTORE_RING_SIZE); /* Wait for the ring to drain to the point where we can send the message. */ prod = xenstore_buf->req_prod; if (prod + len - xenstore_buf->req_cons > XENSTORE_RING_SIZE) { /* Wait for there to be space on the ring */ DEBUG("prod %d, len %d, cons %d, size %d; waiting./n", prod, len, xenstore_buf->req_cons, XENSTORE_RING_SIZE); wait_event(xb_waitq, xenstore_buf->req_prod + len - xenstore_buf->req_cons <= XENSTORE_RING_SIZE); DEBUG("Back from wait./n"); prod = xenstore_buf->req_prod; } /* We're now guaranteed to be able to send the message without overflowing the ring. Do so. */ total_off = 0; req_off = 0; while (total_off < len) { this_chunk = min(cur_req->len - req_off, XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod)); memcpy((char *)xenstore_buf->req + MASK_XENSTORE_IDX(prod), (char *)cur_req->data + req_off, this_chunk); prod += this_chunk; req_off += this_chunk; total_off += this_chunk; if (req_off == cur_req->len) { req_off = 0; if (cur_req == &header_req) cur_req = req; else cur_req++; } } DEBUG("Complete main loop of xb_write./n"); BUG_ON(req_off != 0); BUG_ON(total_off != len); BUG_ON(prod > xenstore_buf->req_cons + XENSTORE_RING_SIZE); /* Remote must see entire message before updating indexes */ wmb(); xenstore_buf->req_prod += len; /* Send evtchn to notify remote */ notify_remote_via_evtchn(start_info.store_evtchn);}
开发者ID:CPFL,项目名称:gxen,代码行数:77,
示例28: dsi_event_threadstatic int dsi_event_thread(void *data){ struct mdss_dsi_event *ev; struct dsi_event_q *evq; struct mdss_dsi_ctrl_pdata *ctrl; unsigned long flag; struct sched_param param; u32 todo = 0; int ret; param.sched_priority = 16; ret = sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); if (ret) pr_err("%s: set priority failed/n", __func__); ev = (struct mdss_dsi_event *)data; /* event */ init_waitqueue_head(&ev->event_q); spin_lock_init(&ev->event_lock); while (1) { wait_event(ev->event_q, (ev->event_pndx != ev->event_gndx)); spin_lock_irqsave(&ev->event_lock, flag); evq = &ev->todo_list[ev->event_gndx++]; todo = evq->todo; ctrl = evq->ctrl; evq->todo = 0; ev->event_gndx %= DSI_EVENT_Q_MAX; spin_unlock_irqrestore(&ev->event_lock, flag); pr_debug("%s: ev=%x/n", __func__, todo); if (todo & DSI_EV_PLL_UNLOCKED) mdss_dsi_pll_relock(ctrl); if (todo & DSI_EV_MDP_FIFO_UNDERFLOW) { mutex_lock(&ctrl->mutex); if (ctrl->recovery) { mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1); mdss_dsi_sw_reset_restore(ctrl); ctrl->recovery->fxn(ctrl->recovery->data); mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0); } mutex_unlock(&ctrl->mutex); } if (todo & DSI_EV_DSI_FIFO_EMPTY) mdss_dsi_sw_reset_restore(ctrl); if (todo & DSI_EV_MDP_BUSY_RELEASE) { spin_lock_irqsave(&ctrl->mdp_lock, flag); ctrl->mdp_busy = false; mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM); complete(&ctrl->mdp_comp); spin_unlock_irqrestore(&ctrl->mdp_lock, flag); /* enable dsi error interrupt */ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1); mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 1); mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0); } } return 0;}
开发者ID:BlackSoulxxx,项目名称:XerXes,代码行数:66,
示例29: data_ack_wait_eventstatic void data_ack_wait_event(struct cbp_wait_event *pdata_ack){ struct cbp_wait_event *cbp_data_ack = (struct cbp_wait_event *)pdata_ack; wait_event(cbp_data_ack->wait_q, (MODEM_ST_READY == atomic_read(&cbp_data_ack->state))||(cbp_power_state==0));}
开发者ID:Scorpio92,项目名称:mediatek,代码行数:6,
注:本文中的wait_event函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ wait_event_interruptible函数代码示例 C++ wait_all_core0_started函数代码示例 |