这篇教程C++ write_lock_irq函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中write_lock_irq函数的典型用法代码示例。如果您正苦于以下问题:C++ write_lock_irq函数的具体用法?C++ write_lock_irq怎么用?C++ write_lock_irq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了write_lock_irq函数的23个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: get_device/** * zfcp_unit_enqueue - enqueue unit to unit list of a port. * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued * Returns: pointer to enqueued unit on success, ERR_PTR on error * * Sets up some unit internal structures and creates sysfs entry. */struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun){ struct zfcp_unit *unit; int retval = -ENOMEM; get_device(&port->dev); unit = zfcp_get_unit_by_lun(port, fcp_lun); if (unit) { put_device(&unit->dev); retval = -EEXIST; goto err_out; } unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) goto err_out; unit->port = port; unit->fcp_lun = fcp_lun; unit->dev.parent = &port->dev; unit->dev.release = zfcp_unit_release; if (dev_set_name(&unit->dev, "0x%016llx", (unsigned long long) fcp_lun)) { kfree(unit); goto err_out; } retval = -EINVAL; INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); spin_lock_init(&unit->latencies.lock); unit->latencies.write.channel.min = 0xFFFFFFFF; unit->latencies.write.fabric.min = 0xFFFFFFFF; unit->latencies.read.channel.min = 0xFFFFFFFF; unit->latencies.read.fabric.min = 0xFFFFFFFF; unit->latencies.cmd.channel.min = 0xFFFFFFFF; unit->latencies.cmd.fabric.min = 0xFFFFFFFF; if (device_register(&unit->dev)) { put_device(&unit->dev); goto err_out; } if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) goto err_out_put; write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); return unit;err_out_put: device_unregister(&unit->dev);err_out: put_device(&port->dev); return ERR_PTR(retval);}
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:70,
示例2: ptrace_attachstatic int ptrace_attach(struct task_struct *task){ bool wait_trap = false; int retval; audit_ptrace(task); retval = -EPERM; if (unlikely(task->flags & PF_KTHREAD)) goto out; if (same_thread_group(task, current)) goto out; /* * Protect exec's credential calculations against our interference; * interference; SUID, SGID and LSM creds get determined differently * under ptrace. */ retval = -ERESTARTNOINTR; if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) goto out; task_lock(task); retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); task_unlock(task); if (retval) goto unlock_creds; write_lock_irq(&tasklist_lock); retval = -EPERM; if (unlikely(task->exit_state)) goto unlock_tasklist; if (task->ptrace) goto unlock_tasklist; task->ptrace = PT_PTRACED; if (task_ns_capable(task, CAP_SYS_PTRACE)) task->ptrace |= PT_PTRACE_CAP; __ptrace_link(task, current); send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); spin_lock(&task->sighand->siglock); /* * If the task is already STOPPED, set GROUP_STOP_PENDING and * TRAPPING, and kick it so that it transits to TRACED. TRAPPING * will be cleared if the child completes the transition or any * event which clears the group stop states happens. We'll wait * for the transition to complete before returning from this * function. * * This hides STOPPED -> RUNNING -> TRACED transition from the * attaching thread but a different thread in the same group can * still observe the transient RUNNING state. IOW, if another * thread's WNOHANG wait(2) on the stopped tracee races against * ATTACH, the wait(2) may fail due to the transient RUNNING. * * The following task_is_stopped() test is safe as both transitions * in and out of STOPPED are protected by siglock. */ if (task_is_stopped(task)) { task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; signal_wake_up(task, 1); wait_trap = true; } spin_unlock(&task->sighand->siglock); retval = 0;unlock_tasklist: write_unlock_irq(&tasklist_lock);unlock_creds: mutex_unlock(&task->signal->cred_guard_mutex);out: if (wait_trap) wait_event(current->signal->wait_chldexit, !(task->group_stop & GROUP_STOP_TRAPPING)); return retval;}
开发者ID:1yankeedt,项目名称:D710BST_FL24_Kernel,代码行数:80,
示例3: masq_wait/*------------------------------------------------------------------------- * bproc_masq_wait */int masq_wait(pid_t pid, int options, struct siginfo *infop, unsigned int * stat_addr, struct rusage * ru) { int result, lpid, status; struct bproc_krequest_t *req; struct bproc_rsyscall_msg_t *msg; struct bproc_wait_resp_t *resp_msg; struct task_struct *child; /* XXX to be 100% semantically correct, we need to verify_area * here on stat_addr and ru here... */ req = bpr_rsyscall1(BPROC_SYS_WAIT); if (!req){ printk("bproc: masq: sys_wait: out of memory./n"); return -ENOMEM; } msg = (struct bproc_rsyscall_msg_t *) bproc_msg(req); msg->arg[0] = pid; msg->arg[1] = options; if (bpr_rsyscall2(BPROC_MASQ_MASTER(current), req, 1) != 0) { bproc_put_req(req); return -EIO; } resp_msg = bproc_msg(req->response); result = resp_msg->hdr.result; status = resp_msg->status; if (stat_addr) put_user(status, stat_addr); if (ru) { /* Only a few elements of the rusage structure are provided by * Linux. Also, convert the times back to the HZ * representation. */ struct rusage ru_tmp; memset(&ru_tmp, 0, sizeof(ru_tmp)); ru_tmp.ru_utime.tv_sec = resp_msg->utime/HZ; ru_tmp.ru_utime.tv_usec = (resp_msg->utime*(1000000/HZ))%1000000; ru_tmp.ru_stime.tv_sec = resp_msg->stime/HZ; ru_tmp.ru_stime.tv_usec = (resp_msg->stime*(1000000/HZ))%1000000; ru_tmp.ru_minflt = resp_msg->minflt; ru_tmp.ru_majflt = resp_msg->majflt; ru_tmp.ru_nswap = resp_msg->nswap; copy_to_user(ru, &ru_tmp, sizeof(ru_tmp)); } bproc_put_req(req); if (result > 0 && (status & 0xff) != 0x7f) { /* It's possible that the process we waited on was actually * local. We need to make sure and get it out of this process * tree too. If it's not, we need to down the non local child * count by one... */ write_lock_irq(&tasklist_lock); child = masq_find_task_by_pid(BPROC_MASQ_MASTER(current), result); if (child) lpid = child->pid; else { lpid = 0; current->bproc.nlchild--; } write_unlock_irq(&tasklist_lock); if (lpid) { /* Do all of this as a kernel call to avoid re-entering * this whole mess... */ set_bit(BPROC_FLAG_KCALL, ¤t->bproc.flag); if (k_wait4(lpid, options & ~WNOHANG, NULL, NULL, NULL) == -1) { printk(KERN_ERR "bproc: masq: local wait failed on %d (%d)/n", lpid, result);#if 0 /* This probably isn't correct. If we fail to wait, * that probably means that somebody else picked it * up. */ write_lock_irq(&tasklist_lock); current->bproc.nlchild--; write_unlock_irq(&tasklist_lock);#endif } } } return result;}
开发者ID:dwrudis,项目名称:clustermatic,代码行数:83,
示例4: exit_notify/* * Send signals to all our closest relatives so that they know * to properly mourn us.. */static void exit_notify(void){ struct task_struct * p, *t; forget_original_parent(current); /* * Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) * * Case i: Our father is in a different pgrp than we are * and we were the only connection outside, so our pgrp * is about to become orphaned. */ t = current->p_pptr; if ((t->pgrp != current->pgrp) && (t->session == current->session) && will_become_orphaned_pgrp(current->pgrp, current) && has_stopped_jobs(current->pgrp)) { kill_pg(current->pgrp,SIGHUP,1); kill_pg(current->pgrp,SIGCONT,1); } /* Let father know we died * * Thread signals are configurable, but you aren't going to use * that to send signals to arbitary processes. * That stops right now. * * If the parent exec id doesn't match the exec id we saved * when we started then we know the parent has changed security * domain. * * If our self_exec id doesn't match our parent_exec_id then * we have changed execution domain as these two values started * the same after a fork. * */ if(current->exit_signal != SIGCHLD && ( current->parent_exec_id != t->self_exec_id || current->self_exec_id != current->parent_exec_id) && !capable(CAP_KILL)) current->exit_signal = SIGCHLD; notify_parent(current, current->exit_signal); /* * This loop does two things: * * A. Make init inherit all the child processes * B. Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ write_lock_irq(&tasklist_lock); while (current->p_cptr != NULL) { p = current->p_cptr; current->p_cptr = p->p_osptr; p->p_ysptr = NULL; p->flags &= ~(PF_PTRACED|PF_TRACESYS); p->p_pptr = p->p_opptr; p->p_osptr = p->p_pptr->p_cptr; if (p->p_osptr) p->p_osptr->p_ysptr = p; p->p_pptr->p_cptr = p; if (p->state == TASK_ZOMBIE) notify_parent(p, p->exit_signal); /* * process group orphan check * Case ii: Our child is in a different pgrp * than we are, and it was the only connection * outside, so the child pgrp is now orphaned. */ if ((p->pgrp != current->pgrp) && (p->session == current->session)) { int pgrp = p->pgrp; write_unlock_irq(&tasklist_lock); if (is_orphaned_pgrp(pgrp) && has_stopped_jobs(pgrp)) { kill_pg(pgrp,SIGHUP,1); kill_pg(pgrp,SIGCONT,1); } write_lock_irq(&tasklist_lock); } } write_unlock_irq(&tasklist_lock); if (current->leader) disassociate_ctty(1);}
开发者ID:benbee,项目名称:Learning,代码行数:99,
示例5: hp_sdc_put//.........这里部分代码省略......... while (i < 4 && w7[i] == hp_sdc.r7[i]) i++; if (i < 4) { hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i); hp_sdc.wi = 0x70 + i; goto finish; } idx++; if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG) goto actdone; curr->idx = idx; act &= ~HP_SDC_ACT_DATAREG; break; } hp_sdc_data_out8(w7[hp_sdc.wi - 0x70]); hp_sdc.r7[hp_sdc.wi - 0x70] = w7[hp_sdc.wi - 0x70]; hp_sdc.wi++; /* write index register autoincrements */ { int i = 0; while ((i < 4) && w7[i] == hp_sdc.r7[i]) i++; if (i >= 4) { curr->idx = idx + 1; if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG) goto actdone; } } goto finish; } /* We don't go any further in the command if there is a pending read, because we don't want interleaved results. */ read_lock_irq(&hp_sdc.rtq_lock); if (hp_sdc.rcurr >= 0) { read_unlock_irq(&hp_sdc.rtq_lock); goto finish; } read_unlock_irq(&hp_sdc.rtq_lock); if (act & HP_SDC_ACT_POSTCMD) { uint8_t postcmd; /* curr->idx should == idx at this point. */ postcmd = curr->seq[idx]; curr->idx++; if (act & HP_SDC_ACT_DATAIN) { /* Start a new read */ hp_sdc.rqty = curr->seq[curr->idx]; do_gettimeofday(&hp_sdc.rtv); curr->idx++; /* Still need to lock here in case of spurious irq. */ write_lock_irq(&hp_sdc.rtq_lock); hp_sdc.rcurr = curridx; write_unlock_irq(&hp_sdc.rtq_lock); hp_sdc_status_out8(postcmd); goto finish; } hp_sdc_status_out8(postcmd); goto actdone; } actdone: if (act & HP_SDC_ACT_SEMAPHORE) up(curr->act.semaphore); else if (act & HP_SDC_ACT_CALLBACK) curr->act.irqhook(0,NULL,0,0); if (curr->idx >= curr->endidx) { /* This transaction is over. */ if (act & HP_SDC_ACT_DEALLOC) kfree(curr); hp_sdc.tq[curridx] = NULL; } else { curr->actidx = idx + 1; curr->idx = idx + 2; } /* Interleave outbound data between the transactions. */ hp_sdc.wcurr++; if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN) hp_sdc.wcurr = 0; finish: /* If by some quirk IBF has cleared and our ISR has run to see that that has happened, do it all again. */ if (!hp_sdc.ibf && limit++ < 20) goto anew; done: if (hp_sdc.wcurr >= 0) tasklet_schedule(&hp_sdc.task); write_unlock(&hp_sdc.lock); return 0;}
开发者ID:125radheyshyam,项目名称:linux,代码行数:101,
示例6: ERR_PTR//.........这里部分代码省略......... /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; /* * Syscall tracing should be turned off in the child regardless * of CLONE_PTRACE. */ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ p->parent_exec_id = p->self_exec_id; /* ok, now we should be set up.. */ p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); p->pdeath_signal = 0; /* Perform scheduler related setup */ sched_fork(p); /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->tgid = p->pid; p->group_leader = p; INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* * Check for pending SIGKILL! The new thread should not be allowed * to slip out of an OOM kill. (or normal SIGKILL.) */ if (sigismember(¤t->pending.signal, SIGKILL)) { write_unlock_irq(&tasklist_lock); retval = -EINTR; goto bad_fork_cleanup_namespace; } /* CLONE_PARENT re-uses the old parent */ if (clone_flags & CLONE_PARENT) p->real_parent = current->real_parent; else p->real_parent = current; p->parent = p->real_parent; if (clone_flags & CLONE_THREAD) { spin_lock(¤t->sighand->siglock); /* * Important: if an exit-all has been started then * do not create this new thread - the whole thread * group is supposed to exit anyway. */ if (current->signal->group_exit) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -EAGAIN; goto bad_fork_cleanup_namespace; } p->tgid = current->tgid; p->group_leader = current->group_leader;
开发者ID:iPodLinux,项目名称:linux-2.6.7-ipod,代码行数:67,
示例7: vcc_remove_socketstatic void vcc_remove_socket(struct sock *sk){ write_lock_irq(&vcc_sklist_lock); sk_del_node_init(sk); write_unlock_irq(&vcc_sklist_lock);}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:6,
示例8: ERR_PTR//.........这里部分代码省略.........#endif if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) p->sas_ss_sp = p->sas_ss_size = 0; user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);#ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);#endif clear_all_latency_tracing(p); if (clone_flags & CLONE_THREAD) p->exit_signal = -1; else if (clone_flags & CLONE_PARENT) p->exit_signal = current->group_leader->exit_signal; else p->exit_signal = (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); cgroup_fork_callbacks(p); cgroup_callbacks_done = 1; write_lock_irq(&tasklist_lock); if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } spin_lock(¤t->sighand->siglock); recalc_sigpending(); if (signal_pending(current)) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_free_pid; } if (clone_flags & CLONE_THREAD) { current->signal->nr_threads++; atomic_inc(¤t->signal->live); atomic_inc(¤t->signal->sigcnt); p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); if (thread_group_leader(p)) {
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:67,
示例9: sys_wait4asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru){ int flag, retval; DECLARE_WAITQUEUE(wait, current); struct task_struct *tsk; if (options & ~(WNOHANG|WUNTRACED|__WNOTHREAD|__WCLONE|__WALL)) return -EINVAL; add_wait_queue(¤t->wait_chldexit,&wait);repeat: flag = 0; current->state = TASK_INTERRUPTIBLE; read_lock(&tasklist_lock); tsk = current; do { struct task_struct *p; for (p = tsk->p_cptr ; p ; p = p->p_osptr) { if (pid>0) { if (p->pid != pid) continue; } else if (!pid) { if (p->pgrp != current->pgrp) continue; } else if (pid != -1) { if (p->pgrp != -pid) continue; } /* Wait for all children (clone and not) if __WALL is set; * otherwise, wait for clone children *only* if __WCLONE is * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) && !(options & __WALL)) continue; flag = 1; switch (p->state) { case TASK_STOPPED: if (!p->exit_code) continue; if (!(options & WUNTRACED) && !(p->ptrace & PT_PTRACED)) continue; read_unlock(&tasklist_lock); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) retval = put_user((p->exit_code << 8) | 0x7f, stat_addr); if (!retval) { p->exit_code = 0; retval = p->pid; } goto end_wait4; case TASK_ZOMBIE: current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime; current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime; read_unlock(&tasklist_lock); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) retval = put_user(p->exit_code, stat_addr); if (retval) goto end_wait4; retval = p->pid; if (p->p_opptr != p->p_pptr) { write_lock_irq(&tasklist_lock); REMOVE_LINKS(p); p->p_pptr = p->p_opptr; SET_LINKS(p); do_notify_parent(p, SIGCHLD); write_unlock_irq(&tasklist_lock); } else release_task(p); goto end_wait4; default: continue; } } if (options & __WNOTHREAD) break; tsk = next_thread(tsk); } while (tsk != current); read_unlock(&tasklist_lock); if (flag) { retval = 0; if (options & WNOHANG) goto end_wait4; retval = -ERESTARTSYS; if (signal_pending(current)) goto end_wait4; schedule(); goto repeat; } retval = -ECHILD;end_wait4: current->state = TASK_RUNNING; remove_wait_queue(¤t->wait_chldexit,&wait); return retval;}
开发者ID:nhanh0,项目名称:hah,代码行数:97,
示例10: ib_cache_updatestatic void ib_cache_update(struct ib_device *device, u8 port){ struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; int i, j; int ret; union ib_gid gid, empty_gid; u16 pkey; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { printk(KERN_WARNING "ib_query_port failed (%d) for %s/n", ret, device->name); goto err; } pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->entry, GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = 0; gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * sizeof *gid_cache->entry, GFP_KERNEL); if (!gid_cache) goto err; gid_cache->table_len = 0; for (i = 0, j = 0; i < tprops->pkey_tbl_len; ++i) { ret = ib_query_pkey(device, port, i, &pkey); if (ret) { printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)/n", ret, device->name, i); goto err; } /* pkey 0xffff must be the default pkeyand 0x0000 must be the invalid * pkey per IBTA spec */ if (pkey) { pkey_cache->entry[j].index = i; pkey_cache->entry[j++].pkey = pkey; } } pkey_cache->table_len = j; memset(&empty_gid, 0, sizeof empty_gid); for (i = 0, j = 0; i < tprops->gid_tbl_len; ++i) { ret = ib_query_gid(device, port, i, &gid); if (ret) { printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)/n", ret, device->name, i); goto err; } /* if the lower 8 bytes the device GID entry is all 0, * our entry is a blank, invalid entry... * depending on device, the upper 8 bytes might or might * not be prefilled with a valid subnet prefix, so * don't rely on them for determining a valid gid * entry */ if (memcmp(&gid + 8, &empty_gid + 8, sizeof gid - 8)) { gid_cache->entry[j].index = i; gid_cache->entry[j++].gid = gid; } } gid_cache->table_len = j; old_pkey_cache = pkey_cache; pkey_cache = kmalloc(sizeof *pkey_cache + old_pkey_cache->table_len * sizeof *pkey_cache->entry, GFP_KERNEL); if (!pkey_cache) pkey_cache = old_pkey_cache; else { pkey_cache->table_len = old_pkey_cache->table_len; memcpy(&pkey_cache->entry[0], &old_pkey_cache->entry[0], pkey_cache->table_len * sizeof *pkey_cache->entry); kfree(old_pkey_cache); } old_gid_cache = gid_cache; gid_cache = kmalloc(sizeof *gid_cache + old_gid_cache->table_len * sizeof *gid_cache->entry, GFP_KERNEL); if (!gid_cache) gid_cache = old_gid_cache; else { gid_cache->table_len = old_gid_cache->table_len; memcpy(&gid_cache->entry[0], &old_gid_cache->entry[0], gid_cache->table_len * sizeof *gid_cache->entry); kfree(old_gid_cache); } write_lock_irq(&device->cache.lock);//.........这里部分代码省略.........
开发者ID:3null,项目名称:fastsocket,代码行数:101,
示例11: warp_clock/* * Adjust the time obtained from the CMOS to be UTC time instead of * local time. * * This is ugly, but preferable to the alternatives. Otherwise we * would either need to write a program to do it in /etc/rc (and risk * confusion if the program gets run more than once; it would also be * hard to make the program warp the clock precisely n hours) or * compile in the timezone information into the kernel. Bad, bad.... * * - TYT, 1992-01-01 * * The best thing to do is to keep the CMOS clock in universal time (UTC) * as real UNIX machines always do it. This avoids all headaches about * daylight saving times and warping kernel clocks. */inline static void warp_clock(void){ write_lock_irq(&xtime_lock); xtime.tv_sec += sys_tz.tz_minuteswest * 60; write_unlock_irq(&xtime_lock);}
开发者ID:FoXPeeD,项目名称:OS-bwis,代码行数:22,
示例12: ERR_PTR//.........这里部分代码省略......... /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; /* * Syscall tracing should be turned off in the child regardless * of CLONE_PTRACE. */ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ p->parent_exec_id = p->self_exec_id; /* ok, now we should be set up.. */ p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; /* Perform scheduler related setup */ sched_fork(p); /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->group_leader = p; INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* * The task hasn't been attached yet, so cpus_allowed mask cannot * have changed. The cpus_allowed mask of the parent may have * changed after it was copied first time, and it may then move to * another CPU - so we re-copy it here and set the child's CPU to * the parent's CPU. This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; set_task_cpu(p, smp_processor_id()); /* * Check for pending SIGKILL! The new thread should not be allowed * to slip out of an OOM kill. (or normal SIGKILL.) */ if (sigismember(¤t->pending.signal, SIGKILL)) { write_unlock_irq(&tasklist_lock); retval = -EINTR; goto bad_fork_cleanup_namespace; } /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) p->real_parent = current->real_parent; else p->real_parent = current; p->parent = p->real_parent; if (clone_flags & CLONE_THREAD) { spin_lock(¤t->sighand->siglock); /* * Important: if an exit-all has been started then
开发者ID:Pating,项目名称:linux-2.6.12-rc2,代码行数:67,
示例13: sys_ptracelong sys_ptrace(long request, pid_t pid, long addr, long data){ struct task_struct *child; long ret; lock_kernel(); ret = -EPERM; if (request == PTRACE_TRACEME) { /* are we already being traced? */ if (current->ptrace & PT_PTRACED) goto out; /* set the ptrace bit in the process flags. */ current->ptrace |= PT_PTRACED; ret = 0; goto out; } ret = -ESRCH; read_lock(&tasklist_lock); child = find_task_by_pid(pid); if (child) get_task_struct(child); read_unlock(&tasklist_lock); if (!child) goto out; ret = -EPERM; if (pid == 1) /* no messing around with init! */ goto out_tsk; if (request == PTRACE_ATTACH) { if (child == current) goto out_tsk; if ((!child->dumpable || (current->uid != child->euid) || (current->uid != child->suid) || (current->uid != child->uid) || (current->gid != child->egid) || (current->gid != child->sgid) || (!cap_issubset(child->cap_permitted, current->cap_permitted)) || (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE)) goto out_tsk; /* the same process cannot be attached many times */ if (child->ptrace & PT_PTRACED) goto out_tsk; child->ptrace |= PT_PTRACED; if (child->p_pptr != current) { unsigned long flags; write_lock_irqsave(&tasklist_lock, flags); REMOVE_LINKS(child); child->p_pptr = current; SET_LINKS(child); write_unlock_irqrestore(&tasklist_lock, flags); } send_sig(SIGSTOP, child, 1); ret = 0; goto out_tsk; } ret = -ESRCH; if (!(child->ptrace & PT_PTRACED)) goto out_tsk; if (child->state != TASK_STOPPED) { if (request != PTRACE_KILL) goto out_tsk; } if (child->p_pptr != current) goto out_tsk; switch (request) { case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: { unsigned long tmp; int copied; copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); ret = -EIO; if (copied != sizeof(tmp)) goto out_tsk; ret = put_user(tmp,(unsigned long *) data); goto out_tsk; } /* when I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = 0; if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) goto out_tsk; ret = -EIO; goto out_tsk; /* Read the word at location addr in the USER area. This will need to change when the kernel no longer saves all regs on a syscall. */ case PTRACE_PEEKUSR: { unsigned long tmp; ret = -EIO; if ((addr & 3) || (unsigned long) addr >= sizeof(struct pt_regs)) goto out_tsk;//.........这里部分代码省略.........
开发者ID:dmgerman,项目名称:linux-pre-history,代码行数:101,
示例14: zfcp_adapter_enqueue/** * zfcp_adapter_enqueue - enqueue a new adapter to the list * @ccw_device: pointer to the struct cc_device * * Returns: 0 if a new adapter was successfully enqueued * -ENOMEM if alloc failed * Enqueues an adapter at the end of the adapter list in the driver data. * All adapter internal structures are set up. * Proc-fs entries are also created. * locks: config_sema must be held to serialise changes to the adapter list */int zfcp_adapter_enqueue(struct ccw_device *ccw_device){ struct zfcp_adapter *adapter; /* * Note: It is safe to release the list_lock, as any list changes * are protected by the config_sema, which must be held to get here */ adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); if (!adapter) return -ENOMEM; ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); if (zfcp_qdio_allocate(adapter)) goto qdio_allocate_failed; if (zfcp_allocate_low_mem_buffers(adapter)) goto failed_low_mem_buffers; if (zfcp_reqlist_alloc(adapter)) goto failed_low_mem_buffers; if (zfcp_adapter_debug_register(adapter)) goto debug_register_failed; init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_thread_wqh); init_waitqueue_head(&adapter->erp_done_wqh); INIT_LIST_HEAD(&adapter->port_list_head); INIT_LIST_HEAD(&adapter->erp_ready_head); INIT_LIST_HEAD(&adapter->erp_running_head); spin_lock_init(&adapter->req_list_lock); spin_lock_init(&adapter->hba_dbf_lock); spin_lock_init(&adapter->san_dbf_lock); spin_lock_init(&adapter->scsi_dbf_lock); spin_lock_init(&adapter->rec_dbf_lock); spin_lock_init(&adapter->req_q_lock); rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); sema_init(&adapter->erp_ready_sem, 0); INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); /* mark adapter unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); dev_set_drvdata(&ccw_device->dev, adapter); if (sysfs_create_group(&ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs)) goto sysfs_failed; write_lock_irq(&zfcp_data.config_lock); atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); list_add_tail(&adapter->list, &zfcp_data.adapter_list_head); write_unlock_irq(&zfcp_data.config_lock); zfcp_fc_nameserver_init(adapter); return 0;sysfs_failed: zfcp_adapter_debug_unregister(adapter);debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); kfree(adapter->req_list);failed_low_mem_buffers: zfcp_free_low_mem_buffers(adapter);qdio_allocate_failed: zfcp_qdio_free(adapter); kfree(adapter); return -ENOMEM;}
开发者ID:kizukukoto,项目名称:WDN900_GPL,代码行数:94,
示例15: kref_get/** * zfcp_port_enqueue - enqueue port to port list of adapter * @adapter: adapter where remote port is added * @wwpn: WWPN of the remote port to be enqueued * @status: initial status for the port * @d_id: destination id of the remote port to be enqueued * Returns: pointer to enqueued port on success, ERR_PTR on error * * All port internal structures are set up and the sysfs entry is generated. * d_id is used to enqueue ports with a well known address like the Directory * Service for nameserver lookup. */struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, u32 status, u32 d_id){ struct zfcp_port *port; int retval = -ENOMEM; kref_get(&adapter->ref); port = zfcp_get_port_by_wwpn(adapter, wwpn); if (port) { put_device(&port->dev); retval = -EEXIST; goto err_out; } port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) goto err_out; rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); port->adapter = adapter; port->d_id = d_id; port->wwpn = wwpn; port->rport_task = RPORT_NONE; port->dev.parent = &adapter->ccw_device->dev; port->dev.release = zfcp_port_release; if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); goto err_out; } retval = -EINVAL; if (device_register(&port->dev)) { put_device(&port->dev); goto err_out; } if (sysfs_create_group(&port->dev.kobj, &zfcp_sysfs_port_attrs)) goto err_out_put; write_lock_irq(&adapter->port_list_lock); list_add_tail(&port->list, &adapter->port_list); write_unlock_irq(&adapter->port_list_lock); atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); return port;err_out_put: device_unregister(&port->dev);err_out: zfcp_ccw_adapter_put(adapter); return ERR_PTR(retval);}
开发者ID:A2109devs,项目名称:lenovo_a2109a_kernel,代码行数:74,
示例16: ib_cache_updatestatic void ib_cache_update(struct ib_device *device, u8 port){ struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; int i; int ret; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { printk(KERN_WARNING "ib_query_port failed (%d) for %s/n", ret, device->name); goto err; } pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->table, GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = tprops->pkey_tbl_len; gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * sizeof *gid_cache->table, GFP_KERNEL); if (!gid_cache) goto err; gid_cache->table_len = tprops->gid_tbl_len; for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)/n", ret, device->name, i); goto err; } } for (i = 0; i < gid_cache->table_len; ++i) { ret = ib_query_gid(device, port, i, gid_cache->table + i); if (ret) { printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)/n", ret, device->name, i); goto err; } } write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; old_gid_cache = device->cache.gid_cache [port - start_port(device)]; device->cache.pkey_cache[port - start_port(device)] = pkey_cache; device->cache.gid_cache [port - start_port(device)] = gid_cache; device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); kfree(old_pkey_cache); kfree(old_gid_cache); kfree(tprops); return;err: kfree(pkey_cache); kfree(gid_cache); kfree(tprops);}
开发者ID:chunyenho,项目名称:RTS-hw2,代码行数:74,
示例17: write_one_page/** * write_one_page - write out a single page and optionally wait on I/O * @page: the page to write * @wait: if true, wait on writeout * * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */int write_one_page(struct page *page, int wait){ struct address_space *mapping = page->mapping; int ret = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, }; BUG_ON(!PageLocked(page)); if (wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { page_cache_get(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } page_cache_release(page); } else { unlock_page(page); } return ret;}EXPORT_SYMBOL(write_one_page);/* * For address_spaces which do not use buffers nor write back. */int __set_page_dirty_no_writeback(struct page *page){ if (!PageDirty(page)) SetPageDirty(page); return 0;}/* * For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. * * This is also used when a single buffer is being dirtied: we want to set the * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * * Most callers have locked the page, which pins the address_space in memory. * But zap_pte_range() does not lock the page, however in that case the * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the * mapping by re-checking page_mapping() insode tree_lock. */int __set_page_dirty_nobuffers(struct page *page){ if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); struct address_space *mapping2; if (!mapping) return 1; write_lock_irq(&mapping->tree_lock); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); task_io_account_write(PAGE_CACHE_SIZE); } radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } write_unlock_irq(&mapping->tree_lock); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return 1; } return 0;}EXPORT_SYMBOL(__set_page_dirty_nobuffers);/* * When a writepage implementation decides that it doesn't want to write this * page for some reason, it should redirty the locked page via * redirty_page_for_writepage() and it should then unlock the page and return 0//.........这里部分代码省略.........
开发者ID:cilynx,项目名称:dd-wrt,代码行数:101,
示例18: sys_setpgidasmlinkage long sys_setpgid(pid_t pid, pid_t pgid){ struct task_struct *p; int err = -EINVAL; if (!pid) pid = current->pid; if (!pgid) pgid = pid; if (pgid < 0) return -EINVAL; /* From this point forward we keep holding onto the tasklist lock * so that our parent does not change from under us. -DaveM */ write_lock_irq(&tasklist_lock); err = -ESRCH; p = find_task_by_pid(pid); if (!p) goto out; err = -EINVAL; if (!thread_group_leader(p)) goto out; if (p->parent == current || p->real_parent == current) { err = -EPERM; if (p->signal->session != current->signal->session) goto out; err = -EACCES; if (p->did_exec) goto out; } else { err = -ESRCH; if (p != current) goto out; } err = -EPERM; if (p->signal->leader) goto out; if (pgid != pid) { struct task_struct *p; do_each_task_pid(pgid, PIDTYPE_PGID, p) { if (p->signal->session == current->signal->session) goto ok_pgid; } while_each_task_pid(pgid, PIDTYPE_PGID, p); goto out; }ok_pgid: err = security_task_setpgid(p, pgid); if (err) goto out; if (process_group(p) != pgid) { detach_pid(p, PIDTYPE_PGID); p->signal->pgrp = pgid; attach_pid(p, PIDTYPE_PGID, pgid); } err = 0;out: /* All paths lead to here, thus we are safe. -DaveM */ write_unlock_irq(&tasklist_lock); return err;}
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:70,
示例19: vcc_insert_socketvoid vcc_insert_socket(struct sock *sk){ write_lock_irq(&vcc_sklist_lock); __vcc_insert_socket(sk); write_unlock_irq(&vcc_sklist_lock);}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:6,
示例20: kzalloc/** * zfcp_unit_enqueue - enqueue unit to unit list of a port. * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued * Returns: pointer to enqueued unit on success, ERR_PTR on error * Locks: config_sema must be held to serialize changes to the unit list * * Sets up some unit internal structures and creates sysfs entry. */struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun){ struct zfcp_unit *unit; unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) return ERR_PTR(-ENOMEM); atomic_set(&unit->refcount, 0); init_waitqueue_head(&unit->remove_wq); unit->port = port; unit->fcp_lun = fcp_lun; dev_set_name(&unit->sysfs_device, "0x%016llx", (unsigned long long) fcp_lun); unit->sysfs_device.parent = &port->sysfs_device; unit->sysfs_device.release = zfcp_sysfs_unit_release; dev_set_drvdata(&unit->sysfs_device, unit); /* mark unit unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); spin_lock_init(&unit->latencies.lock); unit->latencies.write.channel.min = 0xFFFFFFFF; unit->latencies.write.fabric.min = 0xFFFFFFFF; unit->latencies.read.channel.min = 0xFFFFFFFF; unit->latencies.read.fabric.min = 0xFFFFFFFF; unit->latencies.cmd.channel.min = 0xFFFFFFFF; unit->latencies.cmd.fabric.min = 0xFFFFFFFF; read_lock_irq(&zfcp_data.config_lock); if (zfcp_get_unit_by_lun(port, fcp_lun)) { read_unlock_irq(&zfcp_data.config_lock); goto err_out_free; } read_unlock_irq(&zfcp_data.config_lock); if (device_register(&unit->sysfs_device)) goto err_out_free; if (sysfs_create_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs)) { device_unregister(&unit->sysfs_device); return ERR_PTR(-EIO); } zfcp_unit_get(unit); write_lock_irq(&zfcp_data.config_lock); list_add_tail(&unit->list, &port->unit_list_head); atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); write_unlock_irq(&zfcp_data.config_lock); zfcp_port_get(port); return unit;err_out_free: kfree(unit); return ERR_PTR(-EINVAL);}
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:73,
示例21: sys_wait4asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru){ int flag, retval; struct wait_queue wait = { current, NULL }; struct task_struct *p; if (options & ~(WNOHANG|WUNTRACED|__WCLONE)) return -EINVAL; add_wait_queue(¤t->wait_chldexit,&wait);repeat: flag = 0; /* The interruptible state must be set before looking at the children. This because we want to catch any racy exit from the children as do_exit() may run under us. The following read_lock will enforce SMP ordering at the CPU level. */ current->state = TASK_INTERRUPTIBLE; read_lock(&tasklist_lock); for (p = current->p_cptr ; p ; p = p->p_osptr) { if (pid>0) { if (p->pid != pid) continue; } else if (!pid) { if (p->pgrp != current->pgrp) continue; } else if (pid != -1) { if (p->pgrp != -pid) continue; } /* wait for cloned processes iff the __WCLONE flag is set */ if ((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) continue; flag = 1; switch (p->state) { case TASK_STOPPED: if (!p->exit_code) continue; if (!(options & WUNTRACED) && !(p->flags & PF_PTRACED)) continue; read_unlock(&tasklist_lock); current->state = TASK_RUNNING; /* We *must* do this before touching userspace! */ retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) retval = put_user((p->exit_code << 8) | 0x7f, stat_addr); if (!retval) { p->exit_code = 0; retval = p->pid; } goto end_wait4; case TASK_ZOMBIE: current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime; current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime; read_unlock(&tasklist_lock); current->state = TASK_RUNNING; /* We *must* do this before touching userspace! */ retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) retval = put_user(p->exit_code, stat_addr); if (retval) goto end_wait4; retval = p->pid; if (p->p_opptr != p->p_pptr) { write_lock_irq(&tasklist_lock); REMOVE_LINKS(p); p->p_pptr = p->p_opptr; SET_LINKS(p); write_unlock_irq(&tasklist_lock); notify_parent(p, SIGCHLD); } else release(p);#ifdef DEBUG_PROC_TREE audit_ptree();#endif goto end_wait4; default: continue; } } read_unlock(&tasklist_lock); if (flag) { retval = 0; if (options & WNOHANG) goto end_wait4; retval = -ERESTARTSYS; if (signal_pending(current)) goto end_wait4; schedule(); goto repeat; } retval = -ECHILD;end_wait4: remove_wait_queue(¤t->wait_chldexit,&wait); current->state = TASK_RUNNING; return retval;}
开发者ID:benbee,项目名称:Learning,代码行数:96,
示例22: do_adjtimex/* adjtimex mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */int do_adjtimex(struct timex *txc){ long ltemp, mtemp, save_adjust; int result; /* In order to modify anything, you gotta be super-user! */ if (txc->modes && !capable(CAP_SYS_TIME)) return -EPERM; /* Now we validate the data before disabling interrupts */ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) /* singleshot must not be used with any other mode bits */ if (txc->modes != ADJ_OFFSET_SINGLESHOT) return -EINVAL; if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) /* adjustment Offset limited to +- .512 seconds */ if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) return -EINVAL; /* if the quartz is off by more than 10% something is VERY wrong ! */ if (txc->modes & ADJ_TICK) if (txc->tick < 900000/HZ || txc->tick > 1100000/HZ) return -EINVAL; write_lock_irq(&xtime_lock); result = time_state; /* mostly `TIME_OK' */ /* Save for later - semantics of adjtime is to return old value */ save_adjust = time_adjust;#if 0 /* STA_CLOCKERR is never set yet */ time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */#endif /* If there are input parameters, then process them */ if (txc->modes) { if (txc->modes & ADJ_STATUS) /* only set allowed bits */ time_status = (txc->status & ~STA_RONLY) | (time_status & STA_RONLY); if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { result = -EINVAL; goto leave; } time_freq = txc->freq - pps_freq; } if (txc->modes & ADJ_MAXERROR) { if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_maxerror = txc->maxerror; } if (txc->modes & ADJ_ESTERROR) { if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { result = -EINVAL; goto leave; } time_esterror = txc->esterror; } if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ if (txc->constant < 0) { /* NTP v4 uses values > 6 */ result = -EINVAL; goto leave; } time_constant = txc->constant; } if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ if (txc->modes == ADJ_OFFSET_SINGLESHOT) { /* adjtime() is independent from ntp_adjtime() */ time_adjust = txc->offset; } else if ( time_status & (STA_PLL | STA_PPSTIME) ) { ltemp = (time_status & (STA_PPSTIME | STA_PPSSIGNAL)) == (STA_PPSTIME | STA_PPSSIGNAL) ? pps_offset : txc->offset; /* * Scale the phase adjustment and * clamp to the operating range. */ if (ltemp > MAXPHASE) time_offset = MAXPHASE << SHIFT_UPDATE; else if (ltemp < -MAXPHASE) time_offset = -(MAXPHASE << SHIFT_UPDATE); else time_offset = ltemp << SHIFT_UPDATE; /* * Select whether the frequency is to be controlled//.........这里部分代码省略.........
开发者ID:muromec,项目名称:linux-ezxdev,代码行数:101,
示例23: ERR_PTR//.........这里部分代码省略......... * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);#ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);#endif clear_all_latency_tracing(p); /* ok, now we should be set up.. */ if (clone_flags & CLONE_THREAD) p->exit_signal = -1; else if (clone_flags & CLONE_PARENT) p->exit_signal = current->group_leader->exit_signal; else p->exit_signal = (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } spin_lock(¤t->sighand->siglock); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_free_pid; } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); if (thread_group_leader(p)) {
开发者ID:ChenHongxing,项目名称:android_kernel_ZTE_NX505J,代码行数:67,
注:本文中的write_lock_irq函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ write_lock_irqsave函数代码示例 C++ write_lock_bh函数代码示例 |