这篇教程C++ DBUG_ON函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中DBUG_ON函数的典型用法代码示例。如果您正苦于以下问题:C++ DBUG_ON函数的具体用法?C++ DBUG_ON怎么用?C++ DBUG_ON使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了DBUG_ON函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: xpc_teardown_ch_structuresstatic voidxpc_teardown_ch_structures(struct xpc_partition *part){ DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); DBUG_ON(atomic_read(&part->nchannels_active) != 0); /* */ DBUG_ON(part->setup_state != XPC_P_SS_SETUP); part->setup_state = XPC_P_SS_WTEARDOWN; wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); /* */ xpc_arch_ops.teardown_ch_structures(part); kfree(part->remote_openclose_args_base); part->remote_openclose_args = NULL; kfree(part->channels); part->channels = NULL; part->setup_state = XPC_P_SS_TORNDOWN;}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:27,
示例2: xpc_allocate_msg_wait/* * Wait for a message entry to become available for the specified channel, * but don't wait any longer than 1 jiffy. */enum xp_retvalxpc_allocate_msg_wait(struct xpc_channel *ch){ enum xp_retval ret; if (ch->flags & XPC_C_DISCONNECTING) { DBUG_ON(ch->reason == xpInterrupted); return ch->reason; } atomic_inc(&ch->n_on_msg_allocate_wq); ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); atomic_dec(&ch->n_on_msg_allocate_wq); if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; DBUG_ON(ch->reason == xpInterrupted); } else if (ret == 0) { ret = xpTimeout; } else { ret = xpInterrupted; } return ret;}
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:29,
示例3: xpc_allocate_msg_wait/* * Wait for a message entry to become available for the specified channel, * but don't wait any longer than 1 jiffy. */enum xp_retvalxpc_allocate_msg_wait(struct xpc_channel *ch){ enum xp_retval ret; DEFINE_WAIT(wait); if (ch->flags & XPC_C_DISCONNECTING) { DBUG_ON(ch->reason == xpInterrupted); return ch->reason; } atomic_inc(&ch->n_on_msg_allocate_wq); prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); ret = schedule_timeout(1); finish_wait(&ch->msg_allocate_wq, &wait); atomic_dec(&ch->n_on_msg_allocate_wq); if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; DBUG_ON(ch->reason == xpInterrupted); } else if (ret == 0) { ret = xpTimeout; } else { ret = xpInterrupted; } return ret;}
开发者ID:020gzh,项目名称:linux,代码行数:32,
示例4: xpc_timeout_partition_disengage_request/* * Timer function to enforce the timelimit on the partition disengage request. */static voidxpc_timeout_partition_disengage_request(unsigned long data){ struct xpc_partition *part = (struct xpc_partition *)data; DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); (void)xpc_partition_disengaged(part); DBUG_ON(part->disengage_request_timeout != 0); DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);}
开发者ID:274914765,项目名称:C,代码行数:15,
示例5: xpc_initiate_received/* * Acknowledge receipt of a delivered message's payload. * * This function, although called by users, does not call xpc_part_ref() to * ensure that the partition infrastructure is in place. It relies on the * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). * * Arguments: * * partid - ID of partition to which the channel is connected. * ch_number - channel # message received on. * payload - pointer to the payload area allocated via * xpc_initiate_send() or xpc_initiate_send_notify(). */voidxpc_initiate_received(short partid, int ch_number, void *payload){ struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); ch = &part->channels[ch_number]; xpc_received_payload(ch, payload); /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ xpc_msgqueue_deref(ch);}
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:29,
示例6: xpc_setup_msg_structures_uv/* * Allocate msg_slots associated with the channel. */static enum xp_retvalxpc_setup_msg_structures_uv(struct xpc_channel *ch){ static enum xp_retval ret; struct xpc_channel_uv *ch_uv = &ch->sn.uv; DBUG_ON(ch->flags & XPC_C_SETUP); ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct gru_message_queue_desc), GFP_KERNEL); if (ch_uv->cached_notify_gru_mq_desc == NULL) return xpNoMemory; ret = xpc_allocate_send_msg_slot_uv(ch); if (ret == xpSuccess) { ret = xpc_allocate_recv_msg_slot_uv(ch); if (ret != xpSuccess) { kfree(ch_uv->send_msg_slots); xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); } } return ret;}
开发者ID:020gzh,项目名称:linux,代码行数:28,
示例7: xpc_partition_up/* * When XPC HB determines that a partition has come up, it will create a new * kthread and that kthread will call this function to attempt to set up the * basic infrastructure used for Cross Partition Communication with the newly * upped partition. * * The kthread that was created by XPC HB and which setup the XPC * infrastructure will remain assigned to the partition until the partition * goes down. At which time the kthread will teardown the XPC infrastructure * and then exit. * * XPC HB will put the remote partition's XPC per partition specific variables * physical address into xpc_partitions[partid].remote_vars_part_pa prior to * calling xpc_partition_up(). */static voidxpc_partition_up(struct xpc_partition *part){ DBUG_ON(part->channels != NULL); dev_dbg(xpc_chan, "activating partition %d/n", XPC_PARTID(part)); if (xpc_setup_infrastructure(part) != xpSuccess) return; /* * The kthread that XPC HB called us with will become the * channel manager for this partition. It will not return * back to XPC HB until the partition's XPC infrastructure * has been dismantled. */ (void)xpc_part_ref(part); /* this will always succeed */ if (xpc_make_first_contact(part) == xpSuccess) xpc_channel_mgr(part); xpc_part_deref(part); xpc_teardown_infrastructure(part);}
开发者ID:274914765,项目名称:C,代码行数:41,
示例8: xpc_disconnect_channel/* * To disconnect a channel, and reflect it back to all who may be waiting. * * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by * xpc_disconnect_wait(). * * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. */voidxpc_disconnect_channel(const int line, struct xpc_channel *ch, enum xp_retval reason, unsigned long *irq_flags){ u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); DBUG_ON(!spin_is_locked(&ch->lock)); if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) return; DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d/n", reason, line, ch->partid, ch->number); XPC_SET_REASON(ch, reason, line); ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); /* some of these may not have been set */ ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | XPC_C_CONNECTING | XPC_C_CONNECTED); xpc_send_chctl_closerequest(ch, irq_flags); if (channel_was_connected) ch->flags |= XPC_C_WASCONNECTED; spin_unlock_irqrestore(&ch->lock, *irq_flags); /* wake all idle kthreads so they can exit */ if (atomic_read(&ch->kthreads_idle) > 0) { wake_up_all(&ch->idle_wq); } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { /* start a kthread that will do the xpDisconnecting callout */ xpc_create_kthreads(ch, 1, 1); } /* wake those waiting to allocate an entry from the local msg queue */ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) wake_up(&ch->msg_allocate_wq); spin_lock_irqsave(&ch->lock, *irq_flags);}
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:56,
示例9: xpc_process_connect/* * Process a connect message from a remote partition. * * Note: xpc_process_connect() is expecting to be called with the * spin_lock_irqsave held and will leave it locked upon return. */static voidxpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags){ enum xp_retval ret; DBUG_ON(!spin_is_locked(&ch->lock)); if (!(ch->flags & XPC_C_OPENREQUEST) || !(ch->flags & XPC_C_ROPENREQUEST)) { /* nothing more to do for now */ return; } DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); if (!(ch->flags & XPC_C_SETUP)) { spin_unlock_irqrestore(&ch->lock, *irq_flags); ret = xpc_setup_msg_structures(ch); spin_lock_irqsave(&ch->lock, *irq_flags); if (ret != xpSuccess) XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); ch->flags |= XPC_C_SETUP; if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) return; } if (!(ch->flags & XPC_C_OPENREPLY)) { ch->flags |= XPC_C_OPENREPLY; xpc_send_chctl_openreply(ch, irq_flags); } if (!(ch->flags & XPC_C_ROPENREPLY)) return; ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ dev_info(xpc_chan, "channel %d to partition %d connected/n", ch->number, ch->partid); spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_create_kthreads(ch, 1, 0); spin_lock_irqsave(&ch->lock, *irq_flags);}
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:51,
示例10: xpc_save_remote_msgqueue_pa_uvstatic enum xp_retvalxpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, unsigned long gru_mq_desc_gpa){ struct xpc_channel_uv *ch_uv = &ch->sn.uv; DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, gru_mq_desc_gpa);}
开发者ID:020gzh,项目名称:linux,代码行数:10,
示例11: xpc_send_activate_IRQ_uvstatic enum xp_retvalxpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, int msg_type){ struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; struct xpc_partition_uv *part_uv = &part->sn.uv; struct gru_message_queue_desc *gru_mq_desc; unsigned long irq_flags; enum xp_retval ret; DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); msg_hdr->type = msg_type; msg_hdr->partid = xp_partition_id; msg_hdr->act_state = part->act_state; msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);again: if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { gru_mq_desc = part_uv->cached_activate_gru_mq_desc; if (gru_mq_desc == NULL) { gru_mq_desc = kmalloc(sizeof(struct gru_message_queue_desc), GFP_KERNEL); if (gru_mq_desc == NULL) { ret = xpNoMemory; goto done; } part_uv->cached_activate_gru_mq_desc = gru_mq_desc; } ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, part_uv-> activate_gru_mq_desc_gpa); if (ret != xpSuccess) goto done; spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); } /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, msg_size); if (ret != xpSuccess) { smp_rmb(); /* ensure a fresh copy of part_uv->flags */ if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) goto again; }done: mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); return ret;}
开发者ID:020gzh,项目名称:linux,代码行数:55,
示例12: xpc_initiate_send/* * Send a message that contains the user's payload on the specified channel * connected to the specified partition. * * NOTE that this routine can sleep waiting for a message entry to become * available. To not sleep, pass in the XPC_NOWAIT flag. * * Once sent, this routine will not wait for the message to be received, nor * will notification be given when it does happen. * * Arguments: * * partid - ID of partition to which the channel is connected. * ch_number - channel # to send message on. * flags - see xp.h for valid flags. * payload - pointer to the payload which is to be sent. * payload_size - size of the payload in bytes. */enum xp_retvalxpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, u16 payload_size){ struct xpc_partition *part = &xpc_partitions[partid]; enum xp_retval ret = xpUnknownReason; dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d/n", payload, partid, ch_number); DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); DBUG_ON(payload == NULL); if (xpc_part_ref(part)) { ret = xpc_send_payload(&part->channels[ch_number], flags, payload, payload_size, 0, NULL, NULL); xpc_part_deref(part); } return ret;}
开发者ID:johnny,项目名称:CobraDroidBeta,代码行数:40,
示例13: xpc_connect/* * Register for automatic establishment of a channel connection whenever * a partition comes up. * * Arguments: * * ch_number - channel # to register for connection. * func - function to call for asynchronous notification of channel * state changes (i.e., connection, disconnection, error) and * the arrival of incoming messages. * key - pointer to optional user-defined value that gets passed back * to the user on any callouts made to func. * payload_size - size in bytes of the XPC message's payload area which * contains a user-defined message. The user should make * this large enough to hold their largest message. * nentries - max #of XPC message entries a message queue can contain. * The actual number, which is determined when a connection * is established and may be less then requested, will be * passed to the user via the xpcConnected callout. * assigned_limit - max number of kthreads allowed to be processing * messages (per connection) at any given instant. * idle_limit - max number of kthreads allowed to be idle at any given * instant. */enum xpc_retvalxpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, u16 nentries, u32 assigned_limit, u32 idle_limit){ struct xpc_registration *registration; DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); DBUG_ON(payload_size == 0 || nentries == 0); DBUG_ON(func == NULL); DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); registration = &xpc_registrations[ch_number]; if (mutex_lock_interruptible(®istration->mutex) != 0) { return xpcInterrupted; } /* if XPC_CHANNEL_REGISTERED(ch_number) */ if (registration->func != NULL) { mutex_unlock(®istration->mutex); return xpcAlreadyRegistered; } /* register the channel for connection */ registration->msg_size = XPC_MSG_SIZE(payload_size); registration->nentries = nentries; registration->assigned_limit = assigned_limit; registration->idle_limit = idle_limit; registration->key = key; registration->func = func; mutex_unlock(®istration->mutex); xpc_interface.connect(ch_number); return xpcSuccess;}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:62,
示例14: xpc_disconnect_waitvoidxpc_disconnect_wait(int ch_number){ unsigned long irq_flags; short partid; struct xpc_partition *part; struct xpc_channel *ch; int wakeup_channel_mgr; /* now wait for all callouts to the caller's function to cease */ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { part = &xpc_partitions[partid]; if (!xpc_part_ref(part)) continue; ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_WDISCONNECT)) { xpc_part_deref(part); continue; } wait_for_completion(&ch->wdisconnect_wait); spin_lock_irqsave(&ch->lock, irq_flags); DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); wakeup_channel_mgr = 0; if (ch->delayed_IPI_flags) { if (part->act_state != XPC_P_DEACTIVATING) { spin_lock(&part->IPI_lock); XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, ch->delayed_IPI_flags); spin_unlock(&part->IPI_lock); wakeup_channel_mgr = 1; } ch->delayed_IPI_flags = 0; } ch->flags &= ~XPC_C_WDISCONNECT; spin_unlock_irqrestore(&ch->lock, irq_flags); if (wakeup_channel_mgr) xpc_wakeup_channel_mgr(part); xpc_part_deref(part); }}
开发者ID:274914765,项目名称:C,代码行数:50,
示例15: xpc_process_activate_IRQ_rcvd_uvstatic voidxpc_process_activate_IRQ_rcvd_uv(void){ unsigned long irq_flags; short partid; struct xpc_partition *part; u8 act_state_req; DBUG_ON(xpc_activate_IRQ_rcvd == 0); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { part = &xpc_partitions[partid]; if (part->sn.uv.act_state_req == 0) continue; xpc_activate_IRQ_rcvd--; BUG_ON(xpc_activate_IRQ_rcvd < 0); act_state_req = part->sn.uv.act_state_req; part->sn.uv.act_state_req = 0; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { if (part->act_state == XPC_P_AS_INACTIVE) xpc_activate_partition(part); else if (part->act_state == XPC_P_AS_DEACTIVATING) XPC_DEACTIVATE_PARTITION(part, xpReactivating); } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { if (part->act_state == XPC_P_AS_INACTIVE) xpc_activate_partition(part); else XPC_DEACTIVATE_PARTITION(part, xpReactivating); } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); } else { BUG(); } spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (xpc_activate_IRQ_rcvd == 0) break; } spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);}
开发者ID:020gzh,项目名称:linux,代码行数:50,
示例16: xpc_disconnect_waitvoidxpc_disconnect_wait(int ch_number){ unsigned long irq_flags; short partid; struct xpc_partition *part; struct xpc_channel *ch; int wakeup_channel_mgr; /* */ for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (!xpc_part_ref(part)) continue; ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_WDISCONNECT)) { xpc_part_deref(part); continue; } wait_for_completion(&ch->wdisconnect_wait); spin_lock_irqsave(&ch->lock, irq_flags); DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); wakeup_channel_mgr = 0; if (ch->delayed_chctl_flags) { if (part->act_state != XPC_P_AS_DEACTIVATING) { spin_lock(&part->chctl_lock); part->chctl.flags[ch->number] |= ch->delayed_chctl_flags; spin_unlock(&part->chctl_lock); wakeup_channel_mgr = 1; } ch->delayed_chctl_flags = 0; } ch->flags &= ~XPC_C_WDISCONNECT; spin_unlock_irqrestore(&ch->lock, irq_flags); if (wakeup_channel_mgr) xpc_wakeup_channel_mgr(part); xpc_part_deref(part); }}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:49,
示例17: xpc_notify_IRQ_handler/* * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more * than one partition, we use an AMO_t structure per partition to indicate * whether a partition has sent an IPI or not. If it has, then wake up the * associated kthread to handle it. * * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC * running on other partitions. * * Noteworthy Arguments: * * irq - Interrupt ReQuest number. NOT USED. * * dev_id - partid of IPI's potential sender. */irqreturn_txpc_notify_IRQ_handler(int irq, void *dev_id){ short partid = (short)(u64)dev_id; struct xpc_partition *part = &xpc_partitions[partid]; DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); if (xpc_part_ref(part)) { xpc_check_for_channel_activity(part); xpc_part_deref(part); } return IRQ_HANDLED;}
开发者ID:274914765,项目名称:C,代码行数:31,
示例18: xpc_teardown_msg_structures_uv/* * Free up msg_slots and clear other stuff that were setup for the specified * channel. */static voidxpc_teardown_msg_structures_uv(struct xpc_channel *ch){ struct xpc_channel_uv *ch_uv = &ch->sn.uv; DBUG_ON(!spin_is_locked(&ch->lock)); kfree(ch_uv->cached_notify_gru_mq_desc); ch_uv->cached_notify_gru_mq_desc = NULL; if (ch->flags & XPC_C_SETUP) { xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); kfree(ch_uv->send_msg_slots); xpc_init_fifo_uv(&ch_uv->recv_msg_list); kfree(ch_uv->recv_msg_slots); }}
开发者ID:020gzh,项目名称:linux,代码行数:21,
示例19: xpc_notify_senders_of_disconnect_uv/* * Tell the callers of xpc_send_notify() that the status of their payloads * is unknown because the channel is now disconnecting. * * We don't worry about putting these msg_slots on the free list since the * msg_slots themselves are about to be kfree'd. */static voidxpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch){ struct xpc_send_msg_slot_uv *msg_slot; int entry; DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); for (entry = 0; entry < ch->local_nentries; entry++) { if (atomic_read(&ch->n_to_notify) == 0) break; msg_slot = &ch->sn.uv.send_msg_slots[entry]; if (msg_slot->func != NULL) xpc_notify_sender_uv(ch, msg_slot, ch->reason); }}
开发者ID:020gzh,项目名称:linux,代码行数:25,
注:本文中的DBUG_ON函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ DBUG_PRINT函数代码示例 C++ DBUG_ENTER函数代码示例 |