这篇教程C++ ACPI_FLUSH_CPU_CACHE函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中ACPI_FLUSH_CPU_CACHE函数的典型用法代码示例。如果您正苦于以下问题:C++ ACPI_FLUSH_CPU_CACHE函数的具体用法?C++ ACPI_FLUSH_CPU_CACHE怎么用?C++ ACPI_FLUSH_CPU_CACHE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了ACPI_FLUSH_CPU_CACHE函数的23个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: acpi_idle_play_dead/** * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) * @dev: the target CPU * @index: the index of suggested state */static int acpi_idle_play_dead(struct cpuidle_device *dev, int index){ struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); ACPI_FLUSH_CPU_CACHE(); while (1) { if (cx->entry_method == ACPI_CSTATE_HALT) safe_halt(); else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { inb(cx->address); /* See comment in acpi_idle_do_entry() */ inl(acpi_gbl_FADT.xpm_timer_block.address); } else return -ENODEV; } /* Never reached */ return 0;}
开发者ID:FadyAzar,项目名称:linux-te-3.9,代码行数:26,
示例2: acpi_sleep_preparestatic int acpi_sleep_prepare(u32 acpi_state){#ifdef CONFIG_ACPI_SLEEP /* do we have a wakeup address for S2 and S3? */ if (acpi_state == ACPI_STATE_S3) { if (!acpi_wakeup_address) { return -EFAULT; } acpi_set_firmware_waking_vector((acpi_physical_address) virt_to_phys((void *) acpi_wakeup_address)); } ACPI_FLUSH_CPU_CACHE(); acpi_enable_wakeup_device_prep(acpi_state);#endif printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d/n", acpi_state); acpi_enter_sleep_state_prep(acpi_state); return 0;}
开发者ID:Tigrouzen,项目名称:k1099,代码行数:21,
示例3: acpi_idle_play_deadstatic int acpi_idle_play_dead(struct cpuidle_device *dev, int index){ struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ACPI_FLUSH_CPU_CACHE(); while (1) { if (cx->entry_method == ACPI_CSTATE_HALT) safe_halt(); else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { inb(cx->address); inl(acpi_gbl_FADT.xpm_timer_block.address); } else return -ENODEV; } return 0;}
开发者ID:DirtyDroidX,项目名称:android_kernel_htc_m8ul,代码行数:22,
示例4: acpi_idle_enterstatic int acpi_idle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index){ struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); struct acpi_processor *pr; pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->type != ACPI_STATE_C1) { if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { index = ACPI_IDLE_STATE_START; cx = per_cpu(acpi_cstate[index], dev->cpu); } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { if (cx->bm_sts_skip || !acpi_idle_bm_check()) { acpi_idle_enter_bm(pr, cx, true); return index; } else if (drv->safe_state_index >= 0) { index = drv->safe_state_index; cx = per_cpu(acpi_cstate[index], dev->cpu); } else { acpi_safe_halt(); return -EBUSY; } } } lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); acpi_idle_do_entry(cx); lapic_timer_state_broadcast(pr, cx, 0); return index;}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:39,
示例5: acpi_enter_sleep_state//.........这里部分代码省略......... /* Insert SLP_TYP bits */ PM1Acontrol |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position); PM1Bcontrol |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position); /* * We split the writes of SLP_TYP and SLP_EN to workaround * poorly implemented hardware. */ /* Write #1: fill in SLP_TYP data */ status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Insert SLP_ENABLE bit */ PM1Acontrol |= sleep_enable_reg_info->access_bit_mask; PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask; /* Write #2: SLP_TYP + SLP_EN */ ACPI_FLUSH_CPU_CACHE();#if !(defined(CONFIG_XEN) && defined(CONFIG_X86)) status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (sleep_state > ACPI_STATE_S3) { /* * We wanted to sleep > S3, but it didn't happen (by virtue of the * fact that we are still executing!) * * Wait ten seconds, then try again. This is to get S4/S5 to work on * all machines. * * We wait so long to allow chipsets that poll this reg very slowly to * still read the right value. Ideally, this block would go * away entirely. */ acpi_os_stall(10000000); status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_CONTROL, sleep_enable_reg_info-> access_bit_mask); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Wait until we enter sleep state */ do { status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value, ACPI_MTX_DO_NOT_LOCK); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Spin until we wake */ } while (!in_value);#else /* PV ACPI just need check hypercall return value */ err = acpi_notify_hypervisor_state(sleep_state, PM1Acontrol, PM1Bcontrol); if (err) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hypervisor failure [%d]/n", err)); return_ACPI_STATUS(AE_ERROR); }#endif return_ACPI_STATUS(AE_OK);}
开发者ID:cywzl,项目名称:spice4xen,代码行数:101,
示例6: acpi_hw_legacy_sleep//.........这里部分代码省略......... } acpi_gbl_system_awake_and_running = FALSE; status = acpi_hw_enable_all_wakeup_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Get current value of PM1A control */ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &pm1a_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Entering sleep state [S%u]/n", sleep_state)); /* Clear the SLP_EN and SLP_TYP fields */ pm1a_control &= ~(sleep_type_reg_info->access_bit_mask | sleep_enable_reg_info->access_bit_mask); pm1b_control = pm1a_control; /* Insert the SLP_TYP bits */ pm1a_control |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position); pm1b_control |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position); /* * We split the writes of SLP_TYP and SLP_EN to workaround * poorly implemented hardware. */ /* Write #1: write the SLP_TYP data to the PM1 Control registers */ status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Insert the sleep enable (SLP_EN) bit */ pm1a_control |= sleep_enable_reg_info->access_bit_mask; pm1b_control |= sleep_enable_reg_info->access_bit_mask; /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE(); status = acpi_os_prepare_sleep(sleep_state, pm1a_control, pm1b_control); if (ACPI_SKIP(status)) return_ACPI_STATUS(AE_OK); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); /* Write #2: Write both SLP_TYP + SLP_EN */ status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (sleep_state > ACPI_STATE_S3) { /* * We wanted to sleep > S3, but it didn't happen (by virtue of the * fact that we are still executing!) * * Wait ten seconds, then try again. This is to get S4/S5 to work on * all machines. * * We wait so long to allow chipsets that poll this reg very slowly * to still read the right value. Ideally, this block would go * away entirely. */ acpi_os_stall(10000000); status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL, sleep_enable_reg_info-> access_bit_mask); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Wait for transition back to Working State */ do { status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } while (!in_value); return_ACPI_STATUS(AE_OK);}
开发者ID:Luwak,项目名称:linux,代码行数:101,
示例7: acpi_enter_sleep_stateacpi_status asmlinkageacpi_enter_sleep_state ( u8 sleep_state){ u32 PM1Acontrol; u32 PM1Bcontrol; struct acpi_bit_register_info *sleep_type_reg_info; struct acpi_bit_register_info *sleep_enable_reg_info; u32 in_value; acpi_status status; ACPI_FUNCTION_TRACE ("acpi_enter_sleep_state"); if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) || (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) { ACPI_REPORT_ERROR (("Sleep values out of range: A=%X B=%X/n", acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b)); return_ACPI_STATUS (AE_AML_OPERAND_VALUE); } sleep_type_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_TYPE_A); sleep_enable_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_ENABLE); if (sleep_state != ACPI_STATE_S5) { /* Clear wake status */ status = acpi_set_register (ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } status = acpi_hw_clear_acpi_status (ACPI_MTX_DO_NOT_LOCK); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* Disable BM arbitration */ status = acpi_set_register (ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } } /* * 1) Disable all runtime GPEs * 2) Enable all wakeup GPEs */ status = acpi_hw_prepare_gpes_for_sleep (); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* Get current value of PM1A control */ status = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Entering sleep state [S%d]/n", sleep_state)); /* Clear SLP_EN and SLP_TYP fields */ PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask | sleep_enable_reg_info->access_bit_mask); PM1Bcontrol = PM1Acontrol; /* Insert SLP_TYP bits */ PM1Acontrol |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position); PM1Bcontrol |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position); /* * We split the writes of SLP_TYP and SLP_EN to workaround * poorly implemented hardware. */ /* Write #1: fill in SLP_TYP data */ status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } /* Insert SLP_ENABLE bit */ PM1Acontrol |= sleep_enable_reg_info->access_bit_mask; PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask; /* Write #2: SLP_TYP + SLP_EN */ ACPI_FLUSH_CPU_CACHE (); status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol);//.........这里部分代码省略.........
开发者ID:iPodLinux,项目名称:linux-2.6.7-ipod,代码行数:101,
示例8: AcpiHwExtendedSleepACPI_STATUSAcpiHwExtendedSleep ( UINT8 SleepState){ ACPI_STATUS Status; UINT8 SleepTypeValue; UINT64 SleepStatus; ACPI_FUNCTION_TRACE (HwExtendedSleep); /* Extended sleep registers must be valid */ if (!AcpiGbl_FADT.SleepControl.Address || !AcpiGbl_FADT.SleepStatus.Address) { return_ACPI_STATUS (AE_NOT_EXIST); } /* Clear wake status (WAK_STS) */ Status = AcpiWrite ((UINT64) ACPI_X_WAKE_STATUS, &AcpiGbl_FADT.SleepStatus); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } AcpiGbl_SystemAwakeAndRunning = FALSE; /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE (); /* * Set the SLP_TYP and SLP_EN bits. * * Note: We only use the first value returned by the /_Sx method * (AcpiGbl_SleepTypeA) - As per ACPI specification. */ ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Entering sleep state [S%u]/n", SleepState)); SleepTypeValue = ((AcpiGbl_SleepTypeA << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK); Status = AcpiWrite ((UINT64) (SleepTypeValue | ACPI_X_SLEEP_ENABLE), &AcpiGbl_FADT.SleepControl); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } /* Wait for transition back to Working State */ do { Status = AcpiRead (&SleepStatus, &AcpiGbl_FADT.SleepStatus); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } } while (!(((UINT8) SleepStatus) & ACPI_X_WAKE_STATUS)); return_ACPI_STATUS (AE_OK);}
开发者ID:99corps,项目名称:runtime,代码行数:67,
示例9: AcpiEnterSleepState//.........这里部分代码省略......... Arg.Type = ACPI_TYPE_INTEGER; Arg.Integer.Value = SleepState; Status = AcpiEvaluateObject (NULL, METHOD_NAME__GTS, &ArgList, NULL); if (ACPI_FAILURE (Status) && Status != AE_NOT_FOUND) { return_ACPI_STATUS (Status); } /* Get current value of PM1A control */ Status = AcpiHwRegisterRead (ACPI_REGISTER_PM1_CONTROL, &Pm1aControl); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Entering sleep state [S%d]/n", SleepState)); /* Clear the SLP_EN and SLP_TYP fields */ Pm1aControl &= ~(SleepTypeRegInfo->AccessBitMask | SleepEnableRegInfo->AccessBitMask); Pm1bControl = Pm1aControl; /* Insert the SLP_TYP bits */ Pm1aControl |= (AcpiGbl_SleepTypeA << SleepTypeRegInfo->BitPosition); Pm1bControl |= (AcpiGbl_SleepTypeB << SleepTypeRegInfo->BitPosition); /* * We split the writes of SLP_TYP and SLP_EN to workaround * poorly implemented hardware. */ /* Write #1: write the SLP_TYP data to the PM1 Control registers */ Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } /* Insert the sleep enable (SLP_EN) bit */ Pm1aControl |= SleepEnableRegInfo->AccessBitMask; Pm1bControl |= SleepEnableRegInfo->AccessBitMask; /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE (); /* Write #2: Write both SLP_TYP + SLP_EN */ Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } if (SleepState > ACPI_STATE_S3) { /* * We wanted to sleep > S3, but it didn't happen (by virtue of the * fact that we are still executing!) * * Wait ten seconds, then try again. This is to get S4/S5 to work on * all machines. * * We wait so long to allow chipsets that poll this reg very slowly * to still read the right value. Ideally, this block would go * away entirely. */ AcpiOsStall (10000000); Status = AcpiHwRegisterWrite (ACPI_REGISTER_PM1_CONTROL, SleepEnableRegInfo->AccessBitMask); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } } /* Wait until we enter sleep state */ do { Status = AcpiReadBitRegister (ACPI_BITREG_WAKE_STATUS, &InValue); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } /* Spin until we wake */ } while (!InValue); return_ACPI_STATUS (AE_OK);}
开发者ID:CoryXie,项目名称:BarrelfishOS,代码行数:101,
示例10: acpi_cst_idle/* * Idle the CPU in the lowest state possible. This function is called with * interrupts disabled. Note that once it re-enables interrupts, a task * switch can occur so do not access shared data (i.e. the softc) after * interrupts are re-enabled. */static voidacpi_cst_idle(void){ struct acpi_cst_softc *sc; struct acpi_cst_cx *cx_next; union microtime_pcpu start, end; int cx_next_idx, i, tdiff, bm_arb_disabled = 0; /* If disabled, return immediately. */ if (acpi_cst_disable_idle) { ACPI_ENABLE_IRQS(); return; } /* * Look up our CPU id to get our softc. If it's NULL, we'll use C1 * since there is no Cx state for this processor. */ sc = acpi_cst_softc[mdcpu->mi.gd_cpuid]; if (sc == NULL) { acpi_cst_c1_halt(); return; } /* Still probing; use C1 */ if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { acpi_cst_c1_halt(); return; } /* Find the lowest state that has small enough latency. */ cx_next_idx = 0; for (i = sc->cst_cx_lowest; i >= 0; i--) { if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) { cx_next_idx = i; break; } } /* * Check for bus master activity if needed for the selected state. * If there was activity, clear the bit and use the lowest non-C3 state. */ cx_next = &sc->cst_cx_states[cx_next_idx]; if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) { int bm_active; AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); if (bm_active != 0) { AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); cx_next_idx = sc->cst_non_c3; } } /* Select the next state and update statistics. */ cx_next = &sc->cst_cx_states[cx_next_idx]; sc->cst_cx_stats[cx_next_idx]++; KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep")); /* * Execute HLT (or equivalent) and wait for an interrupt. We can't * calculate the time spent in C1 since the place we wake up is an * ISR. Assume we slept half of quantum and return. */ if (cx_next->type == ACPI_STATE_C1) { sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4; cx_next->enter(cx_next); return; } /* Execute the proper preamble before enter the selected state. */ if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) { AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); bm_arb_disabled = 1; } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) { ACPI_FLUSH_CPU_CACHE(); } /* * Enter the selected state and check time spent asleep. */ microtime_pcpu_get(&start); cpu_mfence(); cx_next->enter(cx_next); cpu_mfence(); microtime_pcpu_get(&end); /* Enable bus master arbitration, if it was disabled. */ if (bm_arb_disabled) AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); ACPI_ENABLE_IRQS();//.........这里部分代码省略.........
开发者ID:wan721,项目名称:DragonFlyBSD,代码行数:101,
示例11: acpi_idle_enter_bmstatic int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index){ struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (drv->safe_state_index >= 0) { return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); return -EINVAL; } } local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return -EINVAL; } } acpi_unlazy_tlb(smp_processor_id()); sched_clock_idle_sleep_event(); lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); c3_cpu_count++; if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); raw_spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } acpi_idle_do_entry(cx); if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } kt2 = ktime_get_real(); idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); dev->last_residency = (int)idle_time; sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return index;}
开发者ID:DirtyDroidX,项目名称:android_kernel_htc_m8ul,代码行数:90,
示例12: acpicpu_cstate_idle/* * The main idle loop. */voidacpicpu_cstate_idle(void){ struct cpu_info *ci = curcpu(); struct acpicpu_softc *sc; int state; KASSERT(acpicpu_sc != NULL); KASSERT(ci->ci_acpiid < maxcpus); sc = acpicpu_sc[ci->ci_acpiid]; if (__predict_false(sc == NULL)) return; KASSERT(ci->ci_ilevel == IPL_NONE); KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0); if (__predict_false(sc->sc_cold != false)) return; if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) return; state = acpicpu_cstate_latency(sc); mutex_exit(&sc->sc_mtx); /* * Apply AMD C1E quirk. */ if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0) acpicpu_md_quirk_c1e(); /* * Check for bus master activity. Note that particularly usb(4) * causes high activity, which may prevent the use of C3 states. */ if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { if (acpicpu_cstate_bm_check() != false) state--; if (__predict_false(sc->sc_cstate[state].cs_method == 0)) state = ACPI_STATE_C1; } KASSERT(state != ACPI_STATE_C0); if (state != ACPI_STATE_C3) { acpicpu_cstate_idle_enter(sc, state); return; } /* * On all recent (Intel) CPUs caches are shared * by CPUs and bus master control is required to * keep these coherent while in C3. Flushing the * CPU caches is only the last resort. */ if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) ACPI_FLUSH_CPU_CACHE(); /* * Allow the bus master to request that any given * CPU should return immediately to C0 from C3. */ if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); /* * It may be necessary to disable bus master arbitration * to ensure that bus master cycles do not occur while * sleeping in C3 (see ACPI 4.0, section 8.1.4). */ if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); acpicpu_cstate_idle_enter(sc, state); /* * Disable bus master wake and re-enable the arbiter. */ if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);}
开发者ID:eyberg,项目名称:rumpkernel-netbsd-src,代码行数:91,
示例13: acpi_idle_enter_bm/** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU * @state: the state data * * If BM is detected, the deepest non-C3 idle state is entered instead. */static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_state *state){ struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); u32 t1, t2; int sleep_ticks = 0; pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; if (acpi_idle_suspend) return(acpi_idle_enter_c1(dev, state)); if (acpi_idle_bm_check()) { if (dev->safe_state) { dev->last_state = dev->safe_state; return dev->safe_state->enter(dev, dev->safe_state); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); return 0; } } local_irq_disable(); current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return 0; } acpi_unlazy_tlb(smp_processor_id()); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); /* * Must be done before busmaster disable as we might need to * access HPET ! */ acpi_state_timer_broadcast(pr, cx, 1); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); acpi_idle_do_entry(cx); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { spin_lock(&c3_lock); acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; spin_unlock(&c3_lock); }#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) /* TSC could halt in idle, so notify users */ if (tsc_halts_in_c(ACPI_STATE_C3)) mark_tsc_unstable("TSC halts in idle");#endif sleep_ticks = ticks_elapsed(t1, t2); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);//.........这里部分代码省略.........
开发者ID:10x-Amin,项目名称:x10_Th_kernel,代码行数:101,
示例14: acpi_idle_enter_simple/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @state: the state data */static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_state *state){ struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); u32 t1, t2; int sleep_ticks = 0; pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; if (acpi_idle_suspend) return(acpi_idle_enter_c1(dev, state)); local_irq_disable(); current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return 0; } /* * Must be done before busmaster disable as we might need to * access HPET ! */ acpi_state_timer_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) /* TSC could halt in idle, so notify users */ if (tsc_halts_in_c(cx->type)) mark_tsc_unstable("TSC halts in idle");;#endif sleep_ticks = ticks_elapsed(t1, t2); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); local_irq_enable(); current_thread_info()->status |= TS_POLLING; cx->usage++; acpi_state_timer_broadcast(pr, cx, 0); cx->time += sleep_ticks; return ticks_elapsed_in_us(t1, t2);}
开发者ID:10x-Amin,项目名称:x10_Th_kernel,代码行数:69,
示例15: acpi_hw_legacy_sleepacpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags){ struct acpi_bit_register_info *sleep_type_reg_info; struct acpi_bit_register_info *sleep_enable_reg_info; u32 pm1a_control; u32 pm1b_control; u32 in_value; acpi_status status; ACPI_FUNCTION_TRACE(hw_legacy_sleep); sleep_type_reg_info = acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE); sleep_enable_reg_info = acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE); status = acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_clear_acpi_status(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (sleep_state != ACPI_STATE_S5) { status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) { return_ACPI_STATUS(status); } } status = acpi_hw_disable_all_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_gbl_system_awake_and_running = FALSE; status = acpi_hw_enable_all_wakeup_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (flags & ACPI_EXECUTE_GTS) { acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); } status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &pm1a_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Entering sleep state [S%u]/n", sleep_state)); pm1a_control &= ~(sleep_type_reg_info->access_bit_mask | sleep_enable_reg_info->access_bit_mask); pm1b_control = pm1a_control; pm1a_control |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position); pm1b_control |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position); status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } pm1a_control |= sleep_enable_reg_info->access_bit_mask; pm1b_control |= sleep_enable_reg_info->access_bit_mask; ACPI_FLUSH_CPU_CACHE(); status = acpi_os_prepare_sleep(sleep_state, pm1a_control, pm1b_control); if (ACPI_SKIP(status)) return_ACPI_STATUS(AE_OK); if (ACPI_FAILURE(status))//.........这里部分代码省略.........
开发者ID:DirtyDroidX,项目名称:android_kernel_htc_m8ul,代码行数:101,
示例16: acpi_idle_enter_simple/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @index: the index of suggested state */static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index){ struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time; s64 sleep_ticks = 0; pr = __get_cpu_var(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; local_irq_disable(); if (acpi_idle_suspend) { local_irq_enable(); cpu_relax(); return -EBUSY; } if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); } if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return -EINVAL; } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); kt1 = ktime_get_real(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Update device last_residency*/ dev->last_residency = (int)idle_time; /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); local_irq_enable(); current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return index;}
开发者ID:seyko2,项目名称:openvz_rhel6_kernel_mirror,代码行数:76,
示例17: acpi_cpu_idle/* * Idle the CPU in the lowest state possible. This function is called with * interrupts disabled. Note that once it re-enables interrupts, a task * switch can occur so do not access shared data (i.e. the softc) after * interrupts are re-enabled. */static voidacpi_cpu_idle(){ struct acpi_cpu_softc *sc; struct acpi_cx *cx_next; uint32_t start_time, end_time; int bm_active, cx_next_idx, i; /* If disabled, return immediately. */ if (cpu_disable_idle) { ACPI_ENABLE_IRQS(); return; } /* * Look up our CPU id to get our softc. If it's NULL, we'll use C1 * since there is no ACPI processor object for this CPU. This occurs * for logical CPUs in the HTT case. */ sc = cpu_softc[PCPU_GET(cpuid)]; if (sc == NULL) { acpi_cpu_c1(); return; } /* Find the lowest state that has small enough latency. */ cx_next_idx = 0; for (i = sc->cpu_cx_lowest; i >= 0; i--) { if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) { cx_next_idx = i; break; } } /* * Check for bus master activity. If there was activity, clear * the bit and use the lowest non-C3 state. Note that the USB * driver polling for new devices keeps this bit set all the * time if USB is loaded. */ if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); if (bm_active != 0) { AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); cx_next_idx = min(cx_next_idx, sc->cpu_non_c3); } } /* Select the next state and update statistics. */ cx_next = &sc->cpu_cx_states[cx_next_idx]; sc->cpu_cx_stats[cx_next_idx]++; KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); /* * Execute HLT (or equivalent) and wait for an interrupt. We can't * calculate the time spent in C1 since the place we wake up is an * ISR. Assume we slept half of quantum and return. */ if (cx_next->type == ACPI_STATE_C1) { sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4; acpi_cpu_c1(); return; } /* * For C3, disable bus master arbitration and enable bus master wake * if BM control is available, otherwise flush the CPU cache. */ if (cx_next->type == ACPI_STATE_C3) { if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); } else ACPI_FLUSH_CPU_CACHE(); } /* * Read from P_LVLx to enter C2(+), checking time spent asleep. * Use the ACPI timer for measuring sleep time. Since we need to * get the time very close to the CPU start/stop clock logic, this * is the only reliable time source. */ AcpiRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock); CPU_GET_REG(cx_next->p_lvlx, 1); /* * Read the end time twice. Since it may take an arbitrary time * to enter the idle state, the first read may be executed before * the processor has stopped. Doing it again provides enough * margin that we are certain to have a correct value. */ AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);//.........这里部分代码省略.........
开发者ID:DangerDexter,项目名称:FreeBSD-8.0-dyntick,代码行数:101,
示例18: acpi_enter_sleep_state//.........这里部分代码省略......... if (gts) { /* Execute the _GTS method */ arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = sleep_state; status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { return_ACPI_STATUS(status); } } /* Get current value of PM1A control */ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &pm1a_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Entering sleep state [S%u]/n", sleep_state)); /* Clear the SLP_EN and SLP_TYP fields */ pm1a_control &= ~(sleep_type_reg_info->access_bit_mask | sleep_enable_reg_info->access_bit_mask); pm1b_control = pm1a_control; /* Insert the SLP_TYP bits */ pm1a_control |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position); pm1b_control |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position); /* Write #1: write the SLP_TYP data to the PM1 Control registers */ status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Insert the sleep enable (SLP_EN) bit */ pm1a_control |= sleep_enable_reg_info->access_bit_mask; pm1b_control |= sleep_enable_reg_info->access_bit_mask; /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE(); tboot_sleep(sleep_state, pm1a_control, pm1b_control); /* Write #2: Write both SLP_TYP + SLP_EN */ status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (sleep_state > ACPI_STATE_S3) { /* * We wanted to sleep > S3, but it didn't happen (by virtue of the * fact that we are still executing!) * * Wait ten seconds, then try again. This is to get S4/S5 to work on * all machines. * * We wait so long to allow chipsets that poll this reg very slowly * to still read the right value. Ideally, this block would go * away entirely. */ acpi_os_stall(10000000); status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL, sleep_enable_reg_info-> access_bit_mask); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Wait until we enter sleep state */ do { status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Spin until we wake */ } while (!in_value); return_ACPI_STATUS(AE_OK);}
开发者ID:1703011,项目名称:asuswrt-merlin,代码行数:101,
示例19: acpi_hw_extended_sleepacpi_status acpi_hw_extended_sleep(u8 sleep_state){ acpi_status status; u8 sleep_control; u64 sleep_status; ACPI_FUNCTION_TRACE(hw_extended_sleep); /* Extended sleep registers must be valid */ if (!acpi_gbl_FADT.sleep_control.address || !acpi_gbl_FADT.sleep_status.address) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Clear wake status (WAK_STS) */ status = acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_gbl_system_awake_and_running = FALSE; /* * Set the SLP_TYP and SLP_EN bits. * * Note: We only use the first value returned by the /_Sx method * (acpi_gbl_sleep_type_a) - As per ACPI specification. */ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Entering sleep state [S%u]/n", sleep_state)); sleep_control = ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK) | ACPI_X_SLEEP_ENABLE; /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE(); status = acpi_os_enter_sleep(sleep_state, sleep_control, 0); if (status == AE_CTRL_TERMINATE) { return_ACPI_STATUS(AE_OK); } if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_write((u64)sleep_control, &acpi_gbl_FADT.sleep_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Wait for transition back to Working State */ do { status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS)); return_ACPI_STATUS(AE_OK);}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:66,
示例20: enter_state/* Main interface to do xen specific suspend/resume */static int enter_state(u32 state){ unsigned long flags; int error; unsigned long cr4; if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) ) return -EINVAL; if ( !spin_trylock(&pm_lock) ) return -EBUSY; BUG_ON(system_state != SYS_STATE_active); system_state = SYS_STATE_suspend; printk(XENLOG_INFO "Preparing system for ACPI S%d state./n", state); freeze_domains(); acpi_dmar_reinstate(); if ( (error = disable_nonboot_cpus()) ) { system_state = SYS_STATE_resume; goto enable_cpu; } cpufreq_del_cpu(0); hvm_cpu_down(); acpi_sleep_prepare(state); console_start_sync(); printk("Entering ACPI S%d state./n", state); local_irq_save(flags); spin_debug_disable(); if ( (error = device_power_down()) ) { printk(XENLOG_ERR "Some devices failed to power down."); system_state = SYS_STATE_resume; goto done; } ACPI_FLUSH_CPU_CACHE(); switch ( state ) { case ACPI_STATE_S3: do_suspend_lowlevel(); system_reset_counter++; error = tboot_s3_resume(); break; case ACPI_STATE_S5: acpi_enter_sleep_state(ACPI_STATE_S5); break; default: error = -EINVAL; break; } system_state = SYS_STATE_resume; /* Restore CR4 and EFER from cached values. */ cr4 = read_cr4(); write_cr4(cr4 & ~X86_CR4_MCE); write_efer(read_efer()); device_power_up(); mcheck_init(&boot_cpu_data, 0); write_cr4(cr4); printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state./n", state); if ( (state == ACPI_STATE_S3) && error ) tboot_s3_error(error); done: spin_debug_enable(); local_irq_restore(flags); console_end_sync(); acpi_sleep_post(state); if ( hvm_cpu_up() ) BUG(); enable_cpu: cpufreq_add_cpu(0); microcode_resume_cpu(0); rcu_barrier(); mtrr_aps_sync_begin(); enable_nonboot_cpus(); mtrr_aps_sync_end(); adjust_vtd_irq_affinities(); acpi_dmar_zap(); thaw_domains(); system_state = SYS_STATE_active;//.........这里部分代码省略.........
开发者ID:lwhibernate,项目名称:xen,代码行数:101,
示例21: enter_state/* Main interface to do xen specific suspend/resume */static int enter_state(u32 state){ unsigned long flags; int error; if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) ) return -EINVAL; if ( !spin_trylock(&pm_lock) ) return -EBUSY; printk(XENLOG_INFO "Preparing system for ACPI S%d state.", state); freeze_domains(); disable_nonboot_cpus(); if ( num_online_cpus() != 1 ) { error = -EBUSY; goto enable_cpu; } cpufreq_del_cpu(0); hvm_cpu_down(); acpi_sleep_prepare(state); console_start_sync(); printk("Entering ACPI S%d state./n", state); local_irq_save(flags); spin_debug_disable(); if ( (error = device_power_down()) ) { printk(XENLOG_ERR "Some devices failed to power down."); goto done; } ACPI_FLUSH_CPU_CACHE(); switch ( state ) { case ACPI_STATE_S3: do_suspend_lowlevel(); system_reset_counter++; error = tboot_s3_resume(); break; case ACPI_STATE_S5: acpi_enter_sleep_state(ACPI_STATE_S5); break; default: error = -EINVAL; break; } /* Restore CR4 and EFER from cached values. */ write_cr4(read_cr4()); if ( cpu_has_efer ) write_efer(read_efer()); device_power_up(); printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state./n", state); if ( (state == ACPI_STATE_S3) && error ) panic("Memory integrity was lost on resume (%d)/n", error); done: spin_debug_enable(); local_irq_restore(flags); console_end_sync(); acpi_sleep_post(state); if ( !hvm_cpu_up() ) BUG(); enable_cpu: cpufreq_add_cpu(0); microcode_resume_cpu(0); enable_nonboot_cpus(); thaw_domains(); spin_unlock(&pm_lock); return error;}
开发者ID:a2k2,项目名称:xen-unstable,代码行数:86,
示例22: acpi_processor_idle//.........这里部分代码省略......... /* Re-enable interrupts */ local_irq_enable(); break; } case ACPI_STATE_C3: /* * Before invoking C3, be aware that TSC/APIC timer may be * stopped by H/W. Without carefully handling of TSC/APIC stop issues, * deep C state can't work correctly. */ /* preparing APIC stop */ lapic_timer_off(); /* Get start time (ticks) */ t1 = get_tick(); /* Trace cpu idle entry */ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if ( cx->type != ACPI_STATE_C3 ) /* nothing to be done here */; else if ( power->flags.bm_check && power->flags.bm_control ) { spin_lock(&c3_cpu_status.lock); if ( ++c3_cpu_status.count == num_online_cpus() ) { /* * All CPUs are trying to go to C3 * Disable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } spin_unlock(&c3_cpu_status.lock); } else if ( !power->flags.bm_check ) { /* SMP with no shared cache... Invalidate cache */ ACPI_FLUSH_CPU_CACHE(); } /* Invoke C3 */ acpi_idle_do_entry(cx); if ( (cx->type == ACPI_STATE_C3) && power->flags.bm_check && power->flags.bm_control ) { /* Enable bus master arbitration */ spin_lock(&c3_cpu_status.lock); acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_status.count--; spin_unlock(&c3_cpu_status.lock); } /* Get end time (ticks) */ t2 = get_tick(); /* recovering TSC */ cstate_restore_tsc(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ acpi_update_idle_stats(power, cx, ticks_elapsed(t1, t2)); /* Re-enable interrupts */ local_irq_enable(); /* recovering APIC */ lapic_timer_on(); break; default: /* Now in C0 */ power->last_state = &power->states[0]; local_irq_enable(); sched_tick_resume(); cpufreq_dbs_timer_resume(); return; } /* Now in C0 */ power->last_state = &power->states[0]; sched_tick_resume(); cpufreq_dbs_timer_resume(); if ( cpuidle_current_governor->reflect ) cpuidle_current_governor->reflect(power);}
开发者ID:Ultra-Seven,项目名称:gxen,代码行数:101,
示例23: acpi_idle_enter_bm/** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU * @drv: cpuidle driver containing state data * @index: the index of suggested state * * If BM is detected, the deepest non-C3 idle state is entered instead. */static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index){ struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (drv->safe_state_index >= 0) { return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { acpi_safe_halt(); return -EBUSY; } } if (cx->entry_method == ACPI_CSTATE_FFH) { if (current_set_polling_and_test()) return -EINVAL; } acpi_unlazy_tlb(smp_processor_id()); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); raw_spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } acpi_idle_do_entry(cx); /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } sched_clock_idle_wakeup_event(0); lapic_timer_state_broadcast(pr, cx, 0); return index;}
开发者ID:AndroidGX,项目名称:SimpleGX-L-5.0.2_BOD6_G901F,代码行数:80,
注:本文中的ACPI_FLUSH_CPU_CACHE函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ ACPI_FORMAT_NATIVE_UINT函数代码示例 C++ ACPI_FAILURE函数代码示例 |