您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ wake_up_all函数代码示例

51自学网 2021-06-03 09:51:39
  C++
这篇教程C++ wake_up_all函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中wake_up_all函数的典型用法代码示例。如果您正苦于以下问题:C++ wake_up_all函数的具体用法?C++ wake_up_all怎么用?C++ wake_up_all使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了wake_up_all函数的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: rpcrdma_conn_upcall

static intrpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event){	struct rpcrdma_xprt *xprt = id->context;	struct rpcrdma_ia *ia = &xprt->rx_ia;	struct rpcrdma_ep *ep = &xprt->rx_ep;#ifdef RPC_DEBUG	struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;#endif	struct ib_qp_attr attr;	struct ib_qp_init_attr iattr;	int connstate = 0;	switch (event->event) {	case RDMA_CM_EVENT_ADDR_RESOLVED:	case RDMA_CM_EVENT_ROUTE_RESOLVED:		ia->ri_async_rc = 0;		complete(&ia->ri_done);		break;	case RDMA_CM_EVENT_ADDR_ERROR:		ia->ri_async_rc = -EHOSTUNREACH;		dprintk("RPC:       %s: CM address resolution error, ep 0x%p/n",			__func__, ep);		complete(&ia->ri_done);		break;	case RDMA_CM_EVENT_ROUTE_ERROR:		ia->ri_async_rc = -ENETUNREACH;		dprintk("RPC:       %s: CM route resolution error, ep 0x%p/n",			__func__, ep);		complete(&ia->ri_done);		break;	case RDMA_CM_EVENT_ESTABLISHED:		connstate = 1;		ib_query_qp(ia->ri_id->qp, &attr,			IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,			&iattr);		dprintk("RPC:       %s: %d responder resources"			" (%d initiator)/n",			__func__, attr.max_dest_rd_atomic, attr.max_rd_atomic);		goto connected;	case RDMA_CM_EVENT_CONNECT_ERROR:		connstate = -ENOTCONN;		goto connected;	case RDMA_CM_EVENT_UNREACHABLE:		connstate = -ENETDOWN;		goto connected;	case RDMA_CM_EVENT_REJECTED:		connstate = -ECONNREFUSED;		goto connected;	case RDMA_CM_EVENT_DISCONNECTED:		connstate = -ECONNABORTED;		goto connected;	case RDMA_CM_EVENT_DEVICE_REMOVAL:		connstate = -ENODEV;connected:		dprintk("RPC:       %s: %s: %pI4:%u (ep 0x%p event 0x%x)/n",			__func__,			(event->event <= 11) ? conn[event->event] :						"unknown connection error",			&addr->sin_addr.s_addr,			ntohs(addr->sin_port),			ep, event->event);		atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1);		dprintk("RPC:       %s: %sconnected/n",					__func__, connstate > 0 ? "" : "dis");		ep->rep_connected = connstate;		ep->rep_func(ep);		wake_up_all(&ep->rep_connect_wait);		break;	default:		dprintk("RPC:       %s: unexpected CM event %d/n",			__func__, event->event);		break;	}#ifdef RPC_DEBUG	if (connstate == 1) {		int ird = attr.max_dest_rd_atomic;		int tird = ep->rep_remote_cma.responder_resources;		printk(KERN_INFO "rpcrdma: connection to %pI4:%u "			"on %s, memreg %d slots %d ird %d%s/n",			&addr->sin_addr.s_addr,			ntohs(addr->sin_port),			ia->ri_id->device->name,			ia->ri_memreg_strategy,			xprt->rx_buf.rb_max_requests,			ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");	} else if (connstate < 0) {		printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)/n",			&addr->sin_addr.s_addr,			ntohs(addr->sin_port),			connstate);	}#endif	return 0;}
开发者ID:Addision,项目名称:LVS,代码行数:97,


示例2: findpid

/* * osprd_ioctl(inode, filp, cmd, arg)osp_spin_lock(&d->mutex);		char wake = 't';		if(d->readlockPids->num > 1)		{			findpid(d->readlockPids,current->pid,'r');			wake = 'f';		}		else if(d->readlockPids->num == 1)		{			findpid(d->readlockPids,current->pid,'r');			filp->f_flags &= ~F_OSPRD_LOCKED;		}		else //must be a writer.....		{			d->nwriters = 0;			filp->f_flags &= ~F_OSPRD_LOCKED;		}		osp_spin_unlock(&d->mutex);		if(wake == 't')			wake_up_all(d->blockq); *   Called to perform an ioctl on the named file. */int osprd_ioctl(struct inode *inode, struct file *filp,		unsigned int cmd, unsigned long arg){	osprd_info_t *d = file2osprd(filp);	// device info	int r = 0;			// return value: initially 0	unsigned int my_ticket;	// is file open for writing?	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;	// This line avoids compiler warnings; you may remove it.	(void) filp_writable, (void) d;	// Set 'r' to the ioctl's return value: 0 on success, negative on error	if (cmd == OSPRDIOCACQUIRE) {		// EXERCISE: Lock the ramdisk.		//		// If *filp is open for writing (filp_writable), then attempt		// to write-lock the ramdisk; otherwise attempt to read-lock		// the ramdisk.		//                // This lock request must block using 'd->blockq' until:		// 1) no other process holds a write lock;		// 2) either the request is for a read lock, or no other process		//    holds a read lock; and		// 3) lock requests should be serviced in order, so no process		//    that blocked earlier is still blocked waiting for the		//    lock.		//		// If a process acquires a lock, mark this fact by setting		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You also need to		// keep track of how many read and write locks are held:		// change the 'osprd_info_t' structure to do this.		//		// Also wake up processes waiting on 'd->blockq' as needed.		//		// If the lock request would cause a deadlock, return -EDEADLK.		// If the lock request blocks and is awoken by a signal, then		// return -ERESTARTSYS.		// Otherwise, if we can grant the lock request, return 0.		// 'd->ticket_head' and 'd->ticket_tail' should help you		// service lock requests in order.  These implement a ticket		// order: 'ticket_tail' is the next ticket, and 'ticket_head'		// is the ticket currently being served.  You should set a local		// variable to 'd->ticket_head' and increment 'd->ticket_head'.		// Then, block at least until 'd->ticket_tail == local_ticket'.		// (Some of these operations are in a critical section and must		// be protected by a spinlock; which ones?)		// Your code here (instead of the next two lines).		if(filp_writable)		{			if((d->nwriters == 1 && d->writelockPid == current->pid) || findpid(d->readlockPids,current->pid,'f'))				return -EDEADLK;			osp_spin_lock(&d->mutex);			my_ticket = d->ticket_head;			d->ticket_head++;			osp_spin_unlock(&d->mutex);			int stat = wait_event_interruptible(d->blockq, d->ticket_tail == my_ticket && d->nwriters == 0 && d->readlockPids->num == 0);//or filp->f_flags & F_OSPRD_LOCKED == 0			if(stat == -ERESTARTSYS)			{				if(my_ticket == d->ticket_tail)				{					//lock mutex					//increment the ticket tail to the first alive 					//unlock mutex					osp_spin_lock(&d->mutex);					while(findticket(d->exitlist,d->ticket_tail,'f') && d->ticket_tail<d->ticket_head) // increment to first alive process					d->ticket_tail++;					osp_spin_unlock(&d->mutex);				}				else				{				osp_spin_lock(&d->mutex);				pushticket(d->exitlist,my_ticket); //what if multiple processes get killed at the same time//.........这里部分代码省略.........
开发者ID:tsleeve,项目名称:lab2,代码行数:101,


示例3: sci_remote_node_context_suspend

enum sci_status sci_remote_node_context_suspend(			struct sci_remote_node_context *sci_rnc,			enum sci_remote_node_suspension_reasons suspend_reason,			u32 suspend_type){	enum scis_sds_remote_node_context_states state		= sci_rnc->sm.current_state_id;	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);	enum sci_status status = SCI_FAILURE_INVALID_STATE;	enum sci_remote_node_context_destination_state dest_param =		RNC_DEST_UNSPECIFIED;	dev_dbg(scirdev_to_dev(idev),		"%s: current state %s, current suspend_type %x dest state %d,"			" arg suspend_reason %d, arg suspend_type %x",		__func__, rnc_state_name(state), sci_rnc->suspend_type,		sci_rnc->destination_state, suspend_reason,		suspend_type);	/* Disable automatic state continuations if explicitly suspending. */	if ((suspend_reason == SCI_HW_SUSPEND) ||	    (sci_rnc->destination_state == RNC_DEST_FINAL))		dest_param = sci_rnc->destination_state;	switch (state) {	case SCI_RNC_READY:		break;	case SCI_RNC_INVALIDATING:		if (sci_rnc->destination_state == RNC_DEST_FINAL) {			dev_warn(scirdev_to_dev(idev),				 "%s: already destroying %p/n",				 __func__, sci_rnc);			return SCI_FAILURE_INVALID_STATE;		}		/* Fall through and handle like SCI_RNC_POSTING */	case SCI_RNC_RESUMING:		/* Fall through and handle like SCI_RNC_POSTING */	case SCI_RNC_POSTING:		/* Set the destination state to AWAIT - this signals the		 * entry into the SCI_RNC_READY state that a suspension		 * needs to be done immediately.		 */		if (sci_rnc->destination_state != RNC_DEST_FINAL)			sci_rnc->destination_state = RNC_DEST_SUSPENDED;		sci_rnc->suspend_type = suspend_type;		sci_rnc->suspend_reason = suspend_reason;		return SCI_SUCCESS;	case SCI_RNC_TX_SUSPENDED:		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)			status = SCI_SUCCESS;		break;	case SCI_RNC_TX_RX_SUSPENDED:		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)			status = SCI_SUCCESS;		break;	case SCI_RNC_AWAIT_SUSPENSION:		if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)		    || (suspend_type == sci_rnc->suspend_type))			return SCI_SUCCESS;		break;	default:		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),			 "%s: invalid state %s/n", __func__,			 rnc_state_name(state));		return SCI_FAILURE_INVALID_STATE;	}	sci_rnc->destination_state = dest_param;	sci_rnc->suspend_type = suspend_type;	sci_rnc->suspend_reason = suspend_reason;	if (status == SCI_SUCCESS) { /* Already in the destination state? */		struct isci_host *ihost = idev->owning_port->owning_controller;		wake_up_all(&ihost->eventq); /* Let observers look. */		return SCI_SUCCESS;	}	if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||	    (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {		if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)			isci_dev_set_hang_detection_timeout(idev, 0x00000001);		sci_remote_device_post_request(			idev, SCI_SOFTWARE_SUSPEND_CMD);	}	if (state != SCI_RNC_AWAIT_SUSPENSION)		sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);	return SCI_SUCCESS;}
开发者ID:Announcement,项目名称:linux,代码行数:91,


示例4: rpcrdma_conn_upcall

static intrpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event){	struct rpcrdma_xprt *xprt = id->context;	struct rpcrdma_ia *ia = &xprt->rx_ia;	struct rpcrdma_ep *ep = &xprt->rx_ep;#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)	struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;#endif	struct ib_qp_attr *attr = &ia->ri_qp_attr;	struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;	int connstate = 0;	switch (event->event) {	case RDMA_CM_EVENT_ADDR_RESOLVED:	case RDMA_CM_EVENT_ROUTE_RESOLVED:		ia->ri_async_rc = 0;		complete(&ia->ri_done);		break;	case RDMA_CM_EVENT_ADDR_ERROR:		ia->ri_async_rc = -EHOSTUNREACH;		dprintk("RPC:       %s: CM address resolution error, ep 0x%p/n",			__func__, ep);		complete(&ia->ri_done);		break;	case RDMA_CM_EVENT_ROUTE_ERROR:		ia->ri_async_rc = -ENETUNREACH;		dprintk("RPC:       %s: CM route resolution error, ep 0x%p/n",			__func__, ep);		complete(&ia->ri_done);		break;	case RDMA_CM_EVENT_ESTABLISHED:		connstate = 1;		ib_query_qp(ia->ri_id->qp, attr,			    IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,			    iattr);		dprintk("RPC:       %s: %d responder resources"			" (%d initiator)/n",			__func__, attr->max_dest_rd_atomic,			attr->max_rd_atomic);		rpcrdma_update_connect_private(xprt, &event->param.conn);		goto connected;	case RDMA_CM_EVENT_CONNECT_ERROR:		connstate = -ENOTCONN;		goto connected;	case RDMA_CM_EVENT_UNREACHABLE:		connstate = -ENETDOWN;		goto connected;	case RDMA_CM_EVENT_REJECTED:		connstate = -ECONNREFUSED;		goto connected;	case RDMA_CM_EVENT_DISCONNECTED:		connstate = -ECONNABORTED;		goto connected;	case RDMA_CM_EVENT_DEVICE_REMOVAL:		connstate = -ENODEV;connected:		dprintk("RPC:       %s: %sconnected/n",					__func__, connstate > 0 ? "" : "dis");		atomic_set(&xprt->rx_buf.rb_credits, 1);		ep->rep_connected = connstate;		rpcrdma_conn_func(ep);		wake_up_all(&ep->rep_connect_wait);		/*FALLTHROUGH*/	default:		dprintk("RPC:       %s: %pIS:%u (ep 0x%p): %s/n",			__func__, sap, rpc_get_port(sap), ep,			rdma_event_msg(event->event));		break;	}#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)	if (connstate == 1) {		int ird = attr->max_dest_rd_atomic;		int tird = ep->rep_remote_cma.responder_resources;		pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s/n",			sap, rpc_get_port(sap),			ia->ri_device->name,			ia->ri_ops->ro_displayname,			xprt->rx_buf.rb_max_requests,			ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");	} else if (connstate < 0) {		pr_info("rpcrdma: connection to %pIS:%u closed (%d)/n",			sap, rpc_get_port(sap), connstate);	}#endif	return 0;}
开发者ID:AshishNamdev,项目名称:linux,代码行数:90,


示例5: give_own_timer_callback

static void give_own_timer_callback(unsigned long data){    lte_dev.sdio_thread_kick_own_timer = 1 ;    wake_up_all(&lte_dev.sdio_thread_wq);}
开发者ID:Scorpio92,项目名称:mediatek,代码行数:5,


示例6: osprd_ioctl

/* * osprd_ioctl(inode, filp, cmd, arg) *   Called to perform an ioctl on the named file. */int osprd_ioctl(struct inode *inode, struct file *filp,		unsigned int cmd, unsigned long arg){	osprd_info_t *d = file2osprd(filp);	// device info	int r = 0;			// return value: initially 0    DEFINE_WAIT(wait); //using the low level stuff	// is file open for writing?	int filp_writable = (filp->f_mode & FMODE_WRITE) != 0;	// This line avoids compiler warnings; you may remove it.	(void) filp_writable, (void) d;	// Set 'r' to the ioctl's return value: 0 on success, negative on error	if (cmd == OSPRDIOCACQUIRE) {		// EXERCISE: Lock the ramdisk.		//		// If *filp is open for writing (filp_writable), then attempt		// to write-lock the ramdisk; otherwise attempt to read-lock		// the ramdisk.		//        // This lock request must block using 'd->blockq' until:		// 1) no other process holds a write lock;		// 2) either the request is for a read lock, or no other process		//    holds a read lock; and		// 3) lock requests should be serviced in order, so no process		//    that blocked earlier is still blocked waiting for the		//    lock.		//		// If a process acquires a lock, mark this fact by setting		// 'filp->f_flags |= F_OSPRD_LOCKED'.  You also need to		// keep track of how many read and write locks are held:		// change the 'osprd_info_t' structure to do this.		//		// Also wake up processes waiting on 'd->blockq' as needed.		//		// If the lock request would cause a deadlock, return -EDEADLK.		// If the lock request blocks and is awoken by a signal, then		// return -ERESTARTSYS.		// Otherwise, if we can grant the lock request, return 0.		// 'd->ticket_head' and 'd->ticket_tail' should help you		// service lock requests in order.  These implement a ticket		// order: 'ticket_tail' is the next ticket, and 'ticket_head'		// is the ticket currently being served.  You should set a local		// variable to 'd->ticket_head' and increment 'd->ticket_head'.		// Then, block at least until 'd->ticket_tail == local_ticket'.		// (Some of these operations are in a critical section and must		// be protected by a spinlock; which ones?)		// Your code here (instead of the next two lines).                if (filp_writable) //means we want the write lock.        {            osp_spin_lock(&d->mutex);            if (d->q_size > 0) //if another proc is waiting, give control to "front of line"            {                if (!d->write_lock && !d->read_locks) // no locks except us                    wake_up_all(&d->blockq);                 d->q_size++; //add to back of queue                prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE);                // add to write queue                osp_spin_unlock(&d->mutex);                schedule(); //go to sleep until wake_up_all wakes us                //wake up                osp_spin_lock(&d->mutex);                finish_wait(&d->blockq, &wait); //delete from queue                d->q_size--;                //check that wasn't interrupted                if (signal_pending(current))                {                    osp_spin_unlock(&d->mutex);                    return -ERESTARTSYS;                }            }            // at "front of line." Now check that no readers / writers            while (d->write_lock || d->read_locks)            {                //if the lock is held just go back to back of line.                prepare_to_wait_exclusive(&d->blockq, &wait, TASK_INTERRUPTIBLE);                d->q_size++;                osp_spin_unlock(&d->mutex);                schedule();                //wake up                osp_spin_lock(&d->mutex);                finish_wait(&d->blockq, &wait);                d->q_size--;                if (signal_pending(current))                {                    osp_spin_unlock(&d->mutex);//.........这里部分代码省略.........
开发者ID:zrnorth,项目名称:linux-kernel-module,代码行数:101,


示例7: spin_lock

/* * Look-up block in cache, and increment usage count.  If not in cache, read * and decompress it from disk. */struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,        struct squashfs_cache *cache, u64 block, int length){    int i, n;    struct squashfs_cache_entry *entry;    spin_lock(&cache->lock);    while (1) {        for (i = 0; i < cache->entries; i++)            if (cache->entry[i].block == block)                break;        if (i == cache->entries) {            /*             * Block not in cache, if all cache entries are used             * go to sleep waiting for one to become available.             */            if (cache->unused == 0) {                cache->num_waiters++;                spin_unlock(&cache->lock);                wait_event(cache->wait_queue, cache->unused);                spin_lock(&cache->lock);                cache->num_waiters--;                continue;            }            /*             * At least one unused cache entry.  A simple             * round-robin strategy is used to choose the entry to             * be evicted from the cache.             */            i = cache->next_blk;            for (n = 0; n < cache->entries; n++) {                if (cache->entry[i].refcount == 0)                    break;                i = (i + 1) % cache->entries;            }            cache->next_blk = (i + 1) % cache->entries;            entry = &cache->entry[i];            /*             * Initialise chosen cache entry, and fill it in from             * disk.             */            cache->unused--;            entry->block = block;            entry->refcount = 1;            entry->pending = 1;            entry->num_waiters = 0;            entry->error = 0;            spin_unlock(&cache->lock);            entry->length = squashfs_read_data(sb, entry->data,                                               block, length, &entry->next_index,                                               cache->block_size, cache->pages);            spin_lock(&cache->lock);            if (entry->length < 0)                entry->error = entry->length;            entry->pending = 0;            /*             * While filling this entry one or more other processes             * have looked it up in the cache, and have slept             * waiting for it to become available.             */            if (entry->num_waiters) {                spin_unlock(&cache->lock);                wake_up_all(&entry->wait_queue);            } else                spin_unlock(&cache->lock);            goto out;        }        /*         * Block already in cache.  Increment refcount so it doesn't         * get reused until we're finished with it, if it was         * previously unused there's one less cache entry available         * for reuse.         */        entry = &cache->entry[i];        if (entry->refcount == 0)            cache->unused--;        entry->refcount++;        /*         * If the entry is currently being filled in by another process         * go to sleep waiting for it to become available.         */        if (entry->pending) {            entry->num_waiters++;//.........这里部分代码省略.........
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:101,


示例8: ttm_bo_synccpu_write_release

void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo){	if (atomic_dec_and_test(&bo->cpu_writers))		wake_up_all(&bo->event_queue);}
开发者ID:RyanMallon,项目名称:linux-ep93xx,代码行数:5,


示例9: ttm_bo_swapout

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink){	struct ttm_bo_global *glob =	    container_of(shrink, struct ttm_bo_global, shrink);	struct ttm_buffer_object *bo;	int ret = -EBUSY;	int put_count;	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);	spin_lock(&glob->lru_lock);	while (ret == -EBUSY) {		if (unlikely(list_empty(&glob->swap_lru))) {			spin_unlock(&glob->lru_lock);			return -EBUSY;		}		bo = list_first_entry(&glob->swap_lru,				      struct ttm_buffer_object, swap);		kref_get(&bo->list_kref);		if (!list_empty(&bo->ddestroy)) {			spin_unlock(&glob->lru_lock);			(void) ttm_bo_cleanup_refs(bo, false, false, false);			kref_put(&bo->list_kref, ttm_bo_release_list);			spin_lock(&glob->lru_lock);			continue;		}		/**		 * Reserve buffer. Since we unlock while sleeping, we need		 * to re-check that nobody removed us from the swap-list while		 * we slept.		 */		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);		if (unlikely(ret == -EBUSY)) {			spin_unlock(&glob->lru_lock);			ttm_bo_wait_unreserved(bo, false);			kref_put(&bo->list_kref, ttm_bo_release_list);			spin_lock(&glob->lru_lock);		}	}	BUG_ON(ret != 0);	put_count = ttm_bo_del_from_lru(bo);	spin_unlock(&glob->lru_lock);	ttm_bo_list_ref_sub(bo, put_count, true);	/**	 * Wait for GPU, then move to system cached.	 */	spin_lock(&bo->bdev->fence_lock);	ret = ttm_bo_wait(bo, false, false, false);	spin_unlock(&bo->bdev->fence_lock);	if (unlikely(ret != 0))		goto out;	if ((bo->mem.placement & swap_placement) != swap_placement) {		struct ttm_mem_reg evict_mem;		evict_mem = bo->mem;		evict_mem.mm_node = NULL;		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;		evict_mem.mem_type = TTM_PL_SYSTEM;		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,					     false, false, false);		if (unlikely(ret != 0))			goto out;	}	ttm_bo_unmap_virtual(bo);	/**	 * Swap out. Buffer will be swapped in again as soon as	 * anyone tries to access a ttm page.	 */	if (bo->bdev->driver->swap_notify)		bo->bdev->driver->swap_notify(bo);	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);out:	/**	 *	 * Unreserve without putting on LRU to avoid swapping out an	 * already swapped buffer.	 */	atomic_set(&bo->reserved, 0);	wake_up_all(&bo->event_queue);	kref_put(&bo->list_kref, ttm_bo_release_list);	return ret;}
开发者ID:RyanMallon,项目名称:linux-ep93xx,代码行数:98,


示例10: osprd_ioctl

//.........这里部分代码省略.........			}		osp_spin_lock(&(d->mutex));				d->dead = 0;				if (d->mutex.lock>0)			r = 0;		filp->f_flags |= F_OSPRD_LOCKED;		if (filp_writable)			{ d->n_writel++; d->ticket_tail++; }		else			{ d->n_readl++; }				osp_spin_unlock(&(d->mutex));		//wake_up_all(&d->blockq);					if (!filp_writable)			d->ticket_tail++;		r = 0;	} else if (cmd == OSPRDIOCTRYACQUIRE) {		// EXERCISE: ATTEMPT to lock the ramdisk.		//		// This is just like OSPRDIOCACQUIRE, except it should never		// block.  If OSPRDIOCACQUIRE would block or return deadlock,		// OSPRDIOCTRYACQUIRE should return -EBUSY.		// Otherwise, if we can grant the lock request, return 0.		// Your code here (instead of the next two lines).		//eprintk("Attempting to try acquire/n");		local_ticket = d->ticket_head;		// Check for an existing lock.		if (filp->f_flags & F_OSPRD_LOCKED || d->n_writel != 0			|| (filp_writable && d->n_readl != 0)			|| d->ticket_tail != local_ticket)		{ r = -EBUSY;} //eprintk("Stopped/n");}				// If *filp is open for writing (filp_writable), then attempt		// to write-lock the ramdisk; otherwise attempt to read-lock		// the ramdisk.		else 		{			osp_spin_lock(&(d->mutex));			d->ticket_head++;			filp->f_flags |= F_OSPRD_LOCKED;			if (filp_writable)			{ d->n_writel++; }			else			{ d->n_readl++; }			if(d->ticket_tail < d->ticket_head)				d->ticket_tail++;			osp_spin_unlock(&(d->mutex));			r = 0;			wake_up_all(&d->blockq);		 }		// Also wake up processes waiting on 'd->blockq' as needed.		//		// If the lock request would cause a deadlock, return -EDEADLK.		// Otherwise, if we can grant the lock request, return 0.		//r = -ENOTTY;	} else if (cmd == OSPRDIOCRELEASE) {		// EXERCISE: Unlock the ramdisk.		//		// If the file hasn't locked the ramdisk, return -EINVAL.		if (!(filp->f_flags & F_OSPRD_LOCKED))			{r = -EINVAL; }				// Otherwise, clear the lock from filp->f_flags, wake up		// the wait queue, perform any additional accounting steps		// you need, and return 0.		else 		{			// Clear lock flag.			osp_spin_lock(&(d->mutex));			filp->f_flags &= ~F_OSPRD_LOCKED;						d->n_writel = 0;			d->n_readl = 0;						osp_spin_unlock(&(d->mutex));			// Wake queue.						wake_up_all(&d->blockq);								// Return.			r = 0;		}			} else		r = -ENOTTY; /* unknown command */	return r;}
开发者ID:tbramer,项目名称:CS-111-Lab-2,代码行数:101,


示例11: osprd_ioctl

//.........这里部分代码省略.........		// EXERCISE: ATTEMPT to lock the ramdisk.		//		// This is just like OSPRDIOCACQUIRE, except it should never		// block.  If OSPRDIOCACQUIRE would block or return deadlock,		// OSPRDIOCTRYACQUIRE should return -EBUSY.		// Otherwise, if we can grant the lock request, return 0.		// Your code here (instead of the next two lines).		if (filp_writable)		{			//atomically acquire write lock			osp_spin_lock (&d->mutex);	//atomicity			if ((d->number_read_lock >0) || (d->number_write_lock>0))			{				osp_spin_unlock (&d->mutex);				return -EBUSY;			}			else 	//d->number_read_lock ==0) && (d->number_write_lock==0)			{				d->write_lock_holder = current->pid;				d->number_write_lock ++;				d->ticket_tail++;				d->ticket_head++;				filp -> f_flags |= F_OSPRD_LOCKED;				osp_spin_unlock (&d->mutex);			}		}		else 	//opened for read 		{			//atomically acquire read lock			osp_spin_lock (&d->mutex);			{				if (d->number_write_lock>0)		//can't get read lock				{					osp_spin_unlock(&d->mutex);					return -EBUSY;				}				else				{					add_read_pid (current->pid,d);					d->number_read_lock++;					d->ticket_tail++;					d->ticket_head++;					filp -> f_flags |= F_OSPRD_LOCKED;					osp_spin_unlock (&d->mutex);				}			}		}				//eprintk("Attempting to try acquire/n");		//r = -ENOTTY;	} else if (cmd == OSPRDIOCRELEASE) {		// EXERCISE: Unlock the ramdisk.		//		// If the file hasn't locked the ramdisk, return -EINVAL.		// Otherwise, clear the lock from filp->f_flags, wake up		// the wait queue, perform any additional accounting steps		// you need, and return 0.		//osp_spin_lock (&d->mutex);		if ((filp->f_flags & F_OSPRD_LOCKED)==0)		{			//osp_spin_unlock (&d->mutex);			return -EINVAL;		}		else		{			osp_spin_lock (&d->mutex);				d->check_deadlock_list_head = list_remove_element(d->check_deadlock_list_head,current->pid);			if (filp_writable)		//release the write locker			{				d->write_lock_holder = -1;				d->number_write_lock --;			}			else 	//release the read locker			{				d->number_read_lock --;				d->pid_list_head = list_remove_element(d->pid_list_head,current->pid);				/*if (list_free_all (pid_list_head) == -ENOTTY)					return -ENOTTY;*/				if (d->pid_list_head == NULL)					return -ENOTTY;			}			filp->f_flags &= ~F_OSPRD_LOCKED; 						osp_spin_unlock (&d->mutex);			wake_up_all (&d->blockq);		}		// Your code here (instead of the next line).		//r = -ENOTTY;	}	else		r = -ENOTTY; /* unknown command */	return r;}
开发者ID:summerxuan,项目名称:cs111,代码行数:101,


示例12: vmw_fallback_wait

int vmw_fallback_wait(struct vmw_private *dev_priv,		      bool lazy,		      bool fifo_idle,		      uint32_t seqno,		      bool interruptible,		      unsigned long timeout){	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;	uint32_t count = 0;	uint32_t signal_seq;	int ret;	unsigned long end_jiffies = jiffies + timeout;	bool (*wait_condition)(struct vmw_private *, uint32_t);	DEFINE_WAIT(__wait);	wait_condition = (fifo_idle) ? &vmw_fifo_idle :		&vmw_seqno_passed;	/**	 * Block command submission while waiting for idle.	 */	if (fifo_idle)		down_read(&fifo_state->rwsem);	signal_seq = atomic_read(&dev_priv->marker_seq);	ret = 0;	for (;;) {		prepare_to_wait(&dev_priv->fence_queue, &__wait,				(interruptible) ?				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);		if (wait_condition(dev_priv, seqno))			break;		if (time_after_eq(jiffies, end_jiffies)) {			DRM_ERROR("SVGA device lockup./n");			break;		}		if (lazy)			schedule_timeout(1);		else if ((++count & 0x0F) == 0) {			/**			 * FIXME: Use schedule_hr_timeout here for			 * newer kernels and lower CPU utilization.			 */			__set_current_state(TASK_RUNNING);			schedule();			__set_current_state((interruptible) ?					    TASK_INTERRUPTIBLE :					    TASK_UNINTERRUPTIBLE);		}		if (interruptible && signal_pending(current)) {			ret = -ERESTARTSYS;			break;		}	}	finish_wait(&dev_priv->fence_queue, &__wait);	if (ret == 0 && fifo_idle) {		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);	}	wake_up_all(&dev_priv->fence_queue);	if (fifo_idle)		up_read(&fifo_state->rwsem);	return ret;}
开发者ID:03199618,项目名称:linux,代码行数:68,


示例13: send_pcc_cmd

//.........这里部分代码省略.........	 * the channel before writing to PCC space	 */	if (cmd == CMD_READ) {		/*		 * If there are pending cpc_writes, then we stole the channel		 * before write completion, so first send a WRITE command to		 * platform		 */		if (pcc_ss_data->pending_pcc_write_cmd)			send_pcc_cmd(pcc_ss_id, CMD_WRITE);		ret = check_pcc_chan(pcc_ss_id, false);		if (ret)			goto end;	} else /* CMD_WRITE */		pcc_ss_data->pending_pcc_write_cmd = FALSE;	/*	 * Handle the Minimum Request Turnaround Time(MRTT)	 * "The minimum amount of time that OSPM must wait after the completion	 * of a command before issuing the next command, in microseconds"	 */	if (pcc_ss_data->pcc_mrtt) {		time_delta = ktime_us_delta(ktime_get(),					    pcc_ss_data->last_cmd_cmpl_time);		if (pcc_ss_data->pcc_mrtt > time_delta)			udelay(pcc_ss_data->pcc_mrtt - time_delta);	}	/*	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)	 * "The maximum number of periodic requests that the subspace channel can	 * support, reported in commands per minute. 0 indicates no limitation."	 *	 * This parameter should be ideally zero or large enough so that it can	 * handle maximum number of requests that all the cores in the system can	 * collectively generate. If it is not, we will follow the spec and just	 * not send the request to the platform after hitting the MPAR limit in	 * any 60s window	 */	if (pcc_ss_data->pcc_mpar) {		if (pcc_ss_data->mpar_count == 0) {			time_delta = ktime_ms_delta(ktime_get(),						    pcc_ss_data->last_mpar_reset);			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {				pr_debug("PCC cmd not sent due to MPAR limit");				ret = -EIO;				goto end;			}			pcc_ss_data->last_mpar_reset = ktime_get();			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;		}		pcc_ss_data->mpar_count--;	}	/* Write to the shared comm region. */	writew_relaxed(cmd, &generic_comm_base->command);	/* Flip CMD COMPLETE bit */	writew_relaxed(0, &generic_comm_base->status);	pcc_ss_data->platform_owns_pcc = true;	/* Ring doorbell */	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);	if (ret < 0) {		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d/n",				cmd, ret);		goto end;	}	/* wait for completion and check for PCC errro bit */	ret = check_pcc_chan(pcc_ss_id, true);	if (pcc_ss_data->pcc_mrtt)		pcc_ss_data->last_cmd_cmpl_time = ktime_get();	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);	else		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);end:	if (cmd == CMD_WRITE) {		if (unlikely(ret)) {			for_each_possible_cpu(i) {				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);				if (!desc)					continue;				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)					desc->write_cmd_status = ret;			}		}		pcc_ss_data->pcc_write_cnt++;		wake_up_all(&pcc_ss_data->pcc_write_wait_q);	}	return ret;}
开发者ID:MaxKellermann,项目名称:linux,代码行数:101,


示例14: netlock_release

asmlinkage int netlock_release(void){	DEFINE_WAIT(wait_queue);	spin_lock(&lock);		if (list_empty(&wait_queue.task_list))	{				if( reader_count <= 1)		{			reader_count = 0;			read_lock_available = 1;			write_lock_available = 1;		}		else		{			reader_count--;		}		spin_unlock(&lock);	}	else	{		wait_queue_head_t *pos;		int exclusiveFound = 0;		wait_queue_head_t temp;        // Save the head of the list        		wait_queue_head_t *head = (wait_queue_head_t *) kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);		head->task_list = wait_queue.task_list; 		pos = head;		for (pos->task_list = *(&wait_queue.task_list)->next; /			(pos->task_list.next != *(&wait_queue.task_list.next)) && (pos->task_list.prev != *(&wait_queue.task_list.prev)); /			pos->task_list = *(pos->task_list.next))		{			if (pos->netlock_flag == 1)		//1 indicates exclusive			{				if(exclusiveFound == 0)				{					exclusiveFound = 1;					temp = *pos;				}			}			if (pos->netlock_flag == 0)		//1 indicates exclusive			{				reader_count++;			}		}		if(exclusiveFound == 1)		{			write_lock_available = 0;			remove_wait_queue(&temp, &wait_queue);			kfree(pos);			spin_unlock(&lock);			wake_up(&temp);			//prepare_to_wait(&temp, &wait_queue, TASK_INTERRUPTIBLE);			//finish_wait(&temp, &wait_queue);		}		else		{			if(reader_count > 0)			{				read_lock_available = 0;				spin_unlock(&lock);			 	wake_up_all(head);			}			else			{				spin_unlock(&lock);			}		}		kfree(head);			}	return 0;    }
开发者ID:reservoirman,项目名称:OSAndroidKernel,代码行数:82,


示例15: ttm_bo_unreserve_locked

void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo){	ttm_bo_add_to_lru(bo);	atomic_set(&bo->reserved, 0);	wake_up_all(&bo->event_queue);}
开发者ID:RyanMallon,项目名称:linux-ep93xx,代码行数:6,


示例16: cxl_context_events_pending

void cxl_context_events_pending(struct cxl_context *ctx,				unsigned int new_events){	atomic_add(new_events, &ctx->afu_driver_events);	wake_up_all(&ctx->wq);}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:6,


示例17: bus1_active_cleanup

/** * bus1_active_cleanup() - cleanup drained object * @active:	object to release * @waitq:	wait-queue linked to @active, or NULL * @cleanup:	cleanup callback, or NULL * @userdata:	userdata for callback * * This performs the final object cleanup. The caller must guarantee that the * object is drained, by calling bus1_active_drain(). * * This function invokes the passed cleanup callback on the object. However, it * guarantees that this is done exactly once. If there're multiple parallel * callers, this will pick one randomly and make all others wait until it is * done. If you call this after it was already cleaned up, this is a no-op * and only serves as barrier. * * If @waitq is NULL, the wait is skipped and the call returns immediately. In * this case, another thread has entered before, but there is no guarantee that * they finished executing the cleanup callback, yet. * * If @waitq is non-NULL, this call behaves like a down_write(), followed by an * up_write(), just like bus1_active_drain(). If @waitq is NULL, this rather * behaves like a down_write_trylock(), optionally followed by an up_write(). * * Return: True if this is the thread that released it, false otherwise. */bool bus1_active_cleanup(struct bus1_active *active,			 wait_queue_head_t *waitq,			 void (*cleanup) (struct bus1_active *, void *),			 void *userdata){	int v;	if (BUS1_WARN_ON(!bus1_active_is_drained(active)))		return false;#ifdef CONFIG_DEBUG_LOCK_ALLOC	/*	 * We pretend this is a down_write_interruptible() and all but	 * the release-context get interrupted. This is required, as we	 * cannot call lock_acquired() on multiple threads without	 * synchronization. Hence, only the release-context will do	 * this, all others just release the lock.	 */	lock_acquire_exclusive(&active->dep_map,/* lock */			       0,		/* subclass */			       !waitq,		/* try-lock */			       NULL,		/* nest underneath */			       _RET_IP_);	/* IP */#endif	/* mark object as RELEASE */	v = atomic_cmpxchg(&active->count,			   BUS1_ACTIVE_RELEASE_DIRECT, BUS1_ACTIVE_RELEASE);	if (v != BUS1_ACTIVE_RELEASE_DIRECT)		v = atomic_cmpxchg(&active->count,				   BUS1_ACTIVE_BIAS, BUS1_ACTIVE_RELEASE);	/*	 * If this is the thread that marked the object as RELEASE, we	 * perform the actual release. Otherwise, we wait until the	 * release is done and the node is marked as DRAINED.	 */	if (v == BUS1_ACTIVE_BIAS || v == BUS1_ACTIVE_RELEASE_DIRECT) {#ifdef CONFIG_DEBUG_LOCK_ALLOC		/* we're the release-context and acquired the lock */		lock_acquired(&active->dep_map, _RET_IP_);#endif		if (cleanup)			cleanup(active, userdata);		/* mark as DONE */		atomic_set(&active->count, BUS1_ACTIVE_DONE);		if (waitq)			wake_up_all(waitq);	} else if (waitq) {#ifdef CONFIG_DEBUG_LOCK_ALLOC		/* we're contended against the release context */		lock_contended(&active->dep_map, _RET_IP_);#endif		/* wait until object is DRAINED */		wait_event(*waitq,			   atomic_read(&active->count) == BUS1_ACTIVE_DONE);	}#ifdef CONFIG_DEBUG_LOCK_ALLOC	/*	 * No-one but the release-context acquired the lock. However,	 * that does not matter as we simply treat this as	 * 'interrupted'. Everyone releases the lock, but only one	 * caller really got it.	 */	lock_release(&active->dep_map,	/* lock */		     1,			/* nested (no-op) */		     _RET_IP_);		/* instruction pointer */#endif//.........这里部分代码省略.........
开发者ID:eworm-de,项目名称:bus1,代码行数:101,


示例18: wait_callback

static void wait_callback(struct kgsl_device *device,		struct kgsl_context *context, void *priv, int result){	struct adreno_context *drawctxt = priv;	wake_up_all(&drawctxt->waiting);}
开发者ID:Skin1980,项目名称:bass-MM,代码行数:6,


示例19: radeon_fence_wait_seq

/** * radeon_fence_wait_seq - wait for a specific sequence numbers * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep * * Wait for the requested sequence number(s) to be written by any ring * (all asics).  Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number.  Helper function * for radeon_fence_wait_*(). * Returns 0 if the sequence number has passed, error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,				 bool intr){	uint64_t last_seq[RADEON_NUM_RINGS];	bool signaled;	int i, r;	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {		/* Save current sequence values, used to check for GPU lockups */		for (i = 0; i < RADEON_NUM_RINGS; ++i) {			if (!target_seq[i])				continue;			last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);			trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);			radeon_irq_kms_sw_irq_get(rdev, i);		}		if (intr) {			r = wait_event_interruptible_timeout(rdev->fence_queue, (				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);		} else {			r = wait_event_timeout(rdev->fence_queue, (				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);		}		for (i = 0; i < RADEON_NUM_RINGS; ++i) {			if (!target_seq[i])				continue;			radeon_irq_kms_sw_irq_put(rdev, i);			trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);		}		if (unlikely(r < 0))			return r;		if (unlikely(!signaled)) {			if (rdev->needs_reset)				return -EDEADLK;			/* we were interrupted for some reason and fence			 * isn't signaled yet, resume waiting */			if (r)				continue;			for (i = 0; i < RADEON_NUM_RINGS; ++i) {				if (!target_seq[i])					continue;				if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))					break;			}			if (i != RADEON_NUM_RINGS)				continue;			for (i = 0; i < RADEON_NUM_RINGS; ++i) {				if (!target_seq[i])					continue;				if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))					break;			}			if (i < RADEON_NUM_RINGS) {				/* good news we believe it's a lockup */				dev_warn(rdev->dev, "GPU lockup (waiting for "					 "0x%016llx last fence id 0x%016llx on"					 " ring %d)/n",					 target_seq[i], last_seq[i], i);				/* remember that we need an reset */				rdev->needs_reset = true;				wake_up_all(&rdev->fence_queue);				return -EDEADLK;			}		}	}	return 0;}
开发者ID:AkyZero,项目名称:wrapfs-latest,代码行数:99,


示例20: adreno_drawctxt_detach

/** * adreno_drawctxt_detach(): detach a context from the GPU * @context: Generic KGSL context container for the context * */int adreno_drawctxt_detach(struct kgsl_context *context){	struct kgsl_device *device;	struct adreno_device *adreno_dev;	struct adreno_context *drawctxt;	struct adreno_ringbuffer *rb;	int ret;	if (context == NULL)		return 0;	device = context->device;	adreno_dev = ADRENO_DEVICE(device);	drawctxt = ADRENO_CONTEXT(context);	rb = drawctxt->rb;	/* deactivate context */	if (rb->drawctxt_active == drawctxt)		adreno_drawctxt_switch(adreno_dev, rb, NULL, 0);	mutex_lock(&drawctxt->mutex);	while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {		struct kgsl_cmdbatch *cmdbatch =			drawctxt->cmdqueue[drawctxt->cmdqueue_head];		drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %			ADRENO_CONTEXT_CMDQUEUE_SIZE;		mutex_unlock(&drawctxt->mutex);		/*		 * If the context is deteached while we are waiting for		 * the next command in GFT SKIP CMD, print the context		 * detached status here.		 */		adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch);		/*		 * Don't hold the drawctxt mutex while the cmdbatch is being		 * destroyed because the cmdbatch destroy takes the device		 * mutex and the world falls in on itself		 */		kgsl_cmdbatch_destroy(cmdbatch);		mutex_lock(&drawctxt->mutex);	}	mutex_unlock(&drawctxt->mutex);	/*	 * internal_timestamp is set in adreno_ringbuffer_addcmds,	 * which holds the device mutex. The entire context destroy	 * process requires the device mutex as well. But lets	 * make sure we notice if the locking changes.	 */	BUG_ON(!mutex_is_locked(&device->mutex));	/* Wait for the last global timestamp to pass before continuing */	ret = adreno_drawctxt_wait_global(adreno_dev, context,		drawctxt->internal_timestamp, 10 * 1000);	/*	 * If the wait for global fails then nothing after this point is likely	 * to work very well - BUG_ON() so we can take advantage of the debug	 * tools to figure out what the h - e - double hockey sticks happened	 */	BUG_ON(ret);	kgsl_sharedmem_writel(device, &device->memstore,			KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),			drawctxt->timestamp);	kgsl_sharedmem_writel(device, &device->memstore,			KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),			drawctxt->timestamp);	adreno_profile_process_results(adreno_dev);	/* wake threads waiting to submit commands from this context */	wake_up_all(&drawctxt->waiting);	wake_up_all(&drawctxt->wq);	return ret;}
开发者ID:Skin1980,项目名称:bass-MM,代码行数:90,


示例21: submitjob

asmlinkage long submitjob(void *args, int argslen){	struct job *job = NULL;	struct job *u_args = (struct job *) args;	int err=0;	struct queue *q;		if(args == NULL){		printk("SUBMITJOB: Invalid arguments/n");		err = -EINVAL;		goto out;	}			/* memory allocation for user arguments into kernel space*/	job = kmalloc(sizeof(struct job), GFP_KERNEL);	if( job == NULL ){		printk(		"SUBMITJOB: Insufficient memory/n");		err = -ENOMEM;		goto out;	}		/* Copying and validation of user space arguments */	err = copy_from_user(job, u_args, sizeof(struct job));	if (err != 0){		printk("SUBMITJOB: copy_from_user failed/n");		err = -EFAULT;		goto out;	}			if(job->job_type==ENCRYPT||job->job_type==DECRYPT){			job->key = kmalloc(MD5_KEY_LENGTH, GFP_KERNEL);	if( job->key == NULL ){		printk(		"SUBMITJOB: Insufficient memory/n");		err = -ENOMEM;		goto out;	}		/* Copying and validation of user space arguments */	err = copy_from_user(u_args->key, u_args->key, MD5_KEY_LENGTH);	if (err != 0){		printk("SUBMITJOB: copy_from_user failed/n");		err = -EFAULT;		goto out;	}	}		else if(job->job_type==REMOVE){		delete_job_id(prod_cons_q, argslen);		kfree(job);		goto out;	}		printk("job_type %d/n",job->job_type);		job_id++;	job->job_id = job_id;			top:	mutex_lock(&big_mutex);		/* adding job to the queue */	if(prod_cons_q_len < MAX_LEN){		q = add_job(prod_cons_q, job);		if(IS_ERR(q)){			err = PTR_ERR(q);			goto out;		}		else			prod_cons_q_len++;	}	else if(prod_cons_q_len == MAX_LEN){				printk("[sys_submitjob]: Producer going to sleep/n");		mutex_unlock(&big_mutex);		wait_event_interruptible(producers, prod_cons_q_len < MAX_LEN);		goto top;	}		print_queue(prod_cons_q);		mutex_unlock(&big_mutex);			wake_up_all(&consumers);		out:		return err;}
开发者ID:priyamsirohi,项目名称:Asynchronous-processing,代码行数:97,


示例22: osprd_close_last

// This function is called when a /dev/osprdX file is finally closed.// (If the file descriptor was dup2ed, this function is called only when the// last copy is closed.)static int osprd_close_last(struct inode *inode, struct file *filp){	if (filp) {		osprd_info_t *d = file2osprd(filp);		int filp_writable = filp->f_mode & FMODE_WRITE;        		// EXERCISE: If the user closes a ramdisk file that holds		// a lock, release the lock.  Also wake up blocked processes		// as appropriate.        		// Your code here.                //need to be tested                osp_spin_lock(&d->mutex);        if(filp->f_flags & F_OSPRD_LOCKED)        {                      if(filp_writable)            {                d->writelock=0;                d->write_lock_pid = -1;            }            else            {                d->readlock--;				pid_list_t prev = d->read_lock_pids;				pid_list_t curr = d->read_lock_pids;				while(curr != NULL)				{					if(curr->pid == current->pid)					{                        if(prev == NULL)                            d->read_lock_pids = curr->next;                        else                            prev->next = curr->next;						break;					}					else					{						prev = curr;						curr = curr->next;					}				}                             }                        filp->f_flags  &= ~F_OSPRD_LOCKED;//set to zero            wake_up_all(&d->blockq);            osp_spin_unlock(&d->mutex);        }        else        {            osp_spin_unlock(&d->mutex);            return 0;        }                		// This line avoids compiler warnings; you may remove it.		(void) filp_writable, (void) d;        	}    	return 0;}
开发者ID:OrlandoLee,项目名称:cs111,代码行数:68,


示例23: xfs_buf_item_unpin

/* * This is called to unpin the buffer associated with the buf log * item which was previously pinned with a call to xfs_buf_item_pin(). * * Also drop the reference to the buf item for the current transaction. * If the XFS_BLI_STALE flag is set and we are the last reference, * then free up the buf log item and unlock the buffer. * * If the remove flag is set we are called from uncommit in the * forced-shutdown path.  If that is true and the reference count on * the log item is going to drop to zero we need to free the item's * descriptor in the transaction. */STATIC voidxfs_buf_item_unpin(	struct xfs_log_item	*lip,	int			remove){	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);	xfs_buf_t	*bp = bip->bli_buf;	struct xfs_ail	*ailp = lip->li_ailp;	int		stale = bip->bli_flags & XFS_BLI_STALE;	int		freed;	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);	ASSERT(atomic_read(&bip->bli_refcount) > 0);	trace_xfs_buf_item_unpin(bip);	freed = atomic_dec_and_test(&bip->bli_refcount);	if (atomic_dec_and_test(&bp->b_pin_count))		wake_up_all(&bp->b_waiters);	if (freed && stale) {		ASSERT(bip->bli_flags & XFS_BLI_STALE);		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));		ASSERT(XFS_BUF_ISSTALE(bp));		ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);		trace_xfs_buf_item_unpin_stale(bip);		if (remove) {			/*			 * We have to remove the log item from the transaction			 * as we are about to release our reference to the			 * buffer.  If we don't, the unlock that occurs later			 * in xfs_trans_uncommit() will ry to reference the			 * buffer which we no longer have a hold on.			 */			xfs_trans_del_item(lip);			/*			 * Since the transaction no longer refers to the buffer,			 * the buffer should no longer refer to the transaction.			 */			XFS_BUF_SET_FSPRIVATE2(bp, NULL);		}		/*		 * If we get called here because of an IO error, we may		 * or may not have the item on the AIL. xfs_trans_ail_delete()		 * will take care of that situation.		 * xfs_trans_ail_delete() drops the AIL lock.		 */		if (bip->bli_flags & XFS_BLI_STALE_INODE) {			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);			XFS_BUF_SET_FSPRIVATE(bp, NULL);			XFS_BUF_CLR_IODONE_FUNC(bp);		} else {			spin_lock(&ailp->xa_lock);			xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);			xfs_buf_item_relse(bp);			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);		}		xfs_buf_relse(bp);	}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:78,


示例24: osprd_ioctl

//.........这里部分代码省略.........                d->readlock++;                // Add pid to read lock pid lists				pid_list_t prev = NULL;				pid_list_t curr = d->read_lock_pids;				while(curr != NULL)				{					prev = curr;					curr = curr->next;				}				if(prev == NULL)				{					d->read_lock_pids = kmalloc(sizeof(pid_list_t), GFP_ATOMIC);					d->read_lock_pids->pid = current->pid;					d->read_lock_pids->next = NULL;				}				else				{					// assign to next					prev->next = kmalloc(sizeof(pid_list_t), GFP_ATOMIC);					prev->next->pid = current->pid;					prev->next->next = NULL;				}                r=0;                            }            else if(d->write_lock_pid == current->pid)			{				osp_spin_unlock(&d->mutex);                return -EDEADLK;			}            else            {                r = -EBUSY;            }            osp_spin_unlock(&d->mutex);                    }    }        else if (cmd == OSPRDIOCRELEASE) {        		// EXERCISE: Unlock the ramdisk.		//		// If the file hasn't locked the ramdisk, return -EINVAL.		// Otherwise, clear the lock from filp->f_flags, wake up		// the wait queue, perform any additional accounting steps		// you need, and return 0.        		// Your code here (instead of the next line).		r = -ENOTTY;                if(filp->f_flags & F_OSPRD_LOCKED==0)        {            r = -EINVAL;        }        else        {            osp_spin_lock(&d->mutex);//should put it here CS                        if(filp_writable)  // how to know it is read or write lock            {                d->writelock=0;                d->write_lock_pid = -1;            }            else            {                d->readlock--;                // Clear this PID from the read lock list				pid_list_t prev = NULL;				pid_list_t curr = d->read_lock_pids;				while(curr != NULL)				{					if(curr->pid == current->pid)					{                        if(prev == NULL)                            d->read_lock_pids = curr->next;                        else                            prev->next = curr->next;						kfree(curr);						break;					}					else					{						prev = curr;						curr = curr->next;					}				}                            }            filp->f_flags  &= ~F_OSPRD_LOCKED;//set to zero            wake_up_all(&d->blockq);            r=0;                        osp_spin_unlock(&d->mutex);        }        	} else		r = -ENOTTY; /* unknown command */	return r;}
开发者ID:OrlandoLee,项目名称:cs111,代码行数:101,


示例25: radeon_fence_process

/** * radeon_fence_process - process a fence * * @rdev: radeon_device pointer * @ring: ring index the fence is associated with * * Checks the current fence value and wakes the fence queue * if the sequence number has increased (all asics). */void radeon_fence_process(struct radeon_device *rdev, int ring){	uint64_t seq, last_seq, last_emitted;	unsigned count_loop = 0;	bool wake = false;	/* Note there is a scenario here for an infinite loop but it's	 * very unlikely to happen. For it to happen, the current polling	 * process need to be interrupted by another process and another	 * process needs to update the last_seq btw the atomic read and	 * xchg of the current process.	 *	 * More over for this to go in infinite loop there need to be	 * continuously new fence signaled ie radeon_fence_read needs	 * to return a different value each time for both the currently	 * polling process and the other process that xchg the last_seq	 * btw atomic read and xchg of the current process. And the	 * value the other process set as last seq must be higher than	 * the seq value we just read. Which means that current process	 * need to be interrupted after radeon_fence_read and before	 * atomic xchg.	 *	 * To be even more safe we count the number of time we loop and	 * we bail after 10 loop just accepting the fact that we might	 * have temporarly set the last_seq not to the true real last	 * seq but to an older one.	 */	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);	do {		last_emitted = rdev->fence_drv[ring].sync_seq[ring];		seq = radeon_fence_read(rdev, ring);		seq |= last_seq & 0xffffffff00000000LL;		if (seq < last_seq) {			seq &= 0xffffffff;			seq |= last_emitted & 0xffffffff00000000LL;		}		if (seq <= last_seq || seq > last_emitted) {			break;		}		/* If we loop over we don't want to return without		 * checking if a fence is signaled as it means that the		 * seq we just read is different from the previous on.		 */		wake = true;		last_seq = seq;		if ((count_loop++) > 10) {			/* We looped over too many time leave with the			 * fact that we might have set an older fence			 * seq then the current real last seq as signaled			 * by the hw.			 */			break;		}	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);	if (wake) {		rdev->fence_drv[ring].last_activity = jiffies;		wake_up_all(&rdev->fence_queue);	}}
开发者ID:Forzaferrarileo,项目名称:linux,代码行数:70,


示例26: osprd_ioctl

//.........这里部分代码省略.........				}				osp_spin_unlock(&(d->mutex));				return -ERESTARTSYS;			}			osp_spin_lock(&(d->mutex));			d->reader_cnt++;			d->current_pid = current->pid;			filp->f_flags |= F_OSPRD_LOCKED;			osp_spin_unlock(&(d->mutex));		}		// Check if next ticket is still valid (the process is still alive)		osp_spin_lock(&(d->mutex));		d->ticket_tail++;		int i = 0;		while (i < d->exited_tickets_cnt) {			if (d->exited_tickets[i] == d->ticket_tail) {				d->ticket_tail++;				d->exited_tickets[i] = d->exited_tickets[--d->exited_tickets_cnt];				i = 0;				continue;			}			i++;		}		osp_spin_unlock(&(d->mutex));		r = 0;	} else if (cmd == OSPRDIOCTRYACQUIRE) {		// EXERCISE: ATTEMPT to lock the ramdisk.		//		// This is just like OSPRDIOCACQUIRE, except it should never		// block.  If OSPRDIOCACQUIRE would block or return deadlock,		// OSPRDIOCTRYACQUIRE should return -EBUSY.		// Otherwise, if we can grant the lock request, return 0.		// Your code here (instead of the next two lines).			/*eprintk("Attempting to try acquire/n");			r = -ENOTTY;*/		osp_spin_lock(&(d->mutex));		if (current->pid == d->current_pid) {			osp_spin_unlock(&(d->mutex));			return 0;		}		if (filp_writable) {			if (d->reader_cnt == 0 && d->writer_cnt == 0){				d->writer_cnt++;				filp->f_flags |= F_OSPRD_LOCKED;				d->current_pid = current->pid;			} else {				osp_spin_unlock(&(d->mutex));				return -EBUSY;			}		} else {			if (d->writer_cnt == 0) {				d->reader_cnt++;				filp->f_flags |= F_OSPRD_LOCKED;				d->current_pid = current->pid;			} else {				osp_spin_unlock(&(d->mutex));				return -EBUSY;			}		}		osp_spin_unlock(&(d->mutex));		r = 0;	} else if (cmd == OSPRDIOCRELEASE) {		// EXERCISE: Unlock the ramdisk.		//		// If the file hasn't locked the ramdisk, return -EINVAL.		// Otherwise, clear the lock from filp->f_flags, wake up		// the wait queue, perform any additional accounting steps		// you need, and return 0.		// Your code here (instead of the next line).			//r = -ENOTTY;		osp_spin_lock(&(d->mutex));		if (!(filp->f_flags & F_OSPRD_LOCKED)) {			osp_spin_unlock(&(d->mutex));			return -EINVAL;		}		if (filp_writable) {			d->writer_cnt--;			filp->f_flags &= ~F_OSPRD_LOCKED;		}		else {			d->reader_cnt--;			if (d->reader_cnt == 0)				filp->f_flags &= ~F_OSPRD_LOCKED;		}		wake_up_all(&(d->blockq));		osp_spin_unlock(&(d->mutex));		r = 0;	} else		r = -ENOTTY; /* unknown command */	return r;}
开发者ID:tsengliwei,项目名称:cs111-lab2,代码行数:101,


示例27: __set_state

static inline void __set_state(struct hvsi_struct *hp, int state){	hp->state = state;	print_state(hp);	wake_up_all(&hp->stateq);}
开发者ID:gnensis,项目名称:linux-2.6.15,代码行数:6,



注:本文中的wake_up_all函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ wake_up_bit函数代码示例
C++ wake_up函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。