您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ sock_put函数代码示例

51自学网 2021-06-03 08:05:51
  C++
这篇教程C++ sock_put函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中sock_put函数的典型用法代码示例。如果您正苦于以下问题:C++ sock_put函数的具体用法?C++ sock_put怎么用?C++ sock_put使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了sock_put函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: mptp_release

static int mptp_release(struct socket *sock){	struct sock *sk = sock->sk;	struct mptp_sock *ssk = mptp_sk(sk);	if (unlikely(!sk))		return 0;	mptp_unhash(ssk->src);	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);	synchronize_net();	sock_orphan(sk);	sock->sk = NULL;	skb_queue_purge(&sk->sk_receive_queue);	log_debug("mptp_release sock=%p/n", sk);	sock_put(sk);	return 0;}
开发者ID:paulvlase,项目名称:mptp,代码行数:24,


示例2: pep_sock_unhash

static void pep_sock_unhash(struct sock *sk){	struct pep_sock *pn = pep_sk(sk);	struct sock *skparent = NULL;	lock_sock(sk);	if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {		skparent = pn->listener;		sk_del_node_init(sk);		release_sock(sk);		sk = skparent;		pn = pep_sk(skparent);		lock_sock(sk);	}	/* Unhash a listening sock only when it is closed	 * and all of its active connected pipes are closed. */	if (hlist_empty(&pn->hlist))		pn_sock_unhash(&pn->pn_sk.sk);	release_sock(sk);	if (skparent)		sock_put(skparent);}
开发者ID:325116067,项目名称:semc-qsd8x50,代码行数:24,


示例3: netlink_release

static int netlink_release(struct socket *sock){	struct sock *sk = sock->sk;	if (!sk)		return 0;	netlink_remove(sk);	spin_lock(&sk->protinfo.af_netlink->cb_lock);	if (sk->protinfo.af_netlink->cb) {		sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb);		netlink_destroy_callback(sk->protinfo.af_netlink->cb);		sk->protinfo.af_netlink->cb = NULL;		__sock_put(sk);	}	spin_unlock(&sk->protinfo.af_netlink->cb_lock);	/* OK. Socket is unlinked, and, therefore,	   no new packets will arrive */	sock_orphan(sk);	sock->sk = NULL;	wake_up_interruptible_all(&sk->protinfo.af_netlink->wait);	skb_queue_purge(&sk->write_queue);	if (sk->protinfo.af_netlink->pid && !sk->protinfo.af_netlink->groups) {		struct netlink_notify n = { protocol:sk->protocol,		                            pid:sk->protinfo.af_netlink->pid };		notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);	}			sock_put(sk);	return 0;}
开发者ID:muromec,项目名称:linux-ezxdev,代码行数:36,


示例4: llc_sk_laddr_hashfn

/** *	__llc_lookup_established - Finds connection for the remote/local sap/mac *	@sap: SAP *	@daddr: address of remote LLC (MAC + SAP) *	@laddr: address of local LLC (MAC + SAP) * *	Search connection list of the SAP and finds connection using the remote *	mac, remote sap, local mac, and local sap. Returns pointer for *	connection found, %NULL otherwise. *	Caller has to make sure local_bh is disabled. */static struct sock *__llc_lookup_established(struct llc_sap *sap,					     struct llc_addr *daddr,					     struct llc_addr *laddr){	struct sock *rc;	struct hlist_nulls_node *node;	int slot = llc_sk_laddr_hashfn(sap, laddr);	struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];	rcu_read_lock();again:	sk_nulls_for_each_rcu(rc, node, laddr_hb) {		if (llc_estab_match(sap, daddr, laddr, rc)) {			/* Extra checks required by SLAB_DESTROY_BY_RCU */			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))				goto again;			if (unlikely(llc_sk(rc)->sap != sap ||				     !llc_estab_match(sap, daddr, laddr, rc))) {				sock_put(rc);				continue;			}			goto found;		}	}	rc = NULL;	/*	 * if the nulls value we got at the end of this lookup is	 * not the expected one, we must restart lookup.	 * We probably met an item that was moved to another chain.	 */	if (unlikely(get_nulls_value(node) != slot))		goto again;found:	rcu_read_unlock();	return rc;}
开发者ID:nos1609,项目名称:Chrono_Kernel-1,代码行数:47,


示例5: llc_ui_release

/** *	llc_ui_release - shutdown socket *	@sock: Socket to release. * *	Shutdown and deallocate an existing socket. */static int llc_ui_release(struct socket *sock){	struct sock *sk = sock->sk;	struct llc_sock *llc;	if (unlikely(sk == NULL))		goto out;	sock_hold(sk);	lock_sock(sk);	llc = llc_sk(sk);	dprintk("%s: closing local(%02X) remote(%02X)/n", __func__,		llc->laddr.lsap, llc->daddr.lsap);	if (!llc_send_disc(sk))		llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);	if (!sock_flag(sk, SOCK_ZAPPED))		llc_sap_remove_socket(llc->sap, sk);	release_sock(sk);	if (llc->dev)		dev_put(llc->dev);	sock_put(sk);	llc_sk_free(sk);out:	return 0;}
开发者ID:Core2idiot,项目名称:Kernel-Samsung-3.0...-,代码行数:30,


示例6: skb_clone_tx_timestamp

void skb_clone_tx_timestamp(struct sk_buff *skb){	struct phy_device *phydev;	struct sk_buff *clone;	struct sock *sk = skb->sk;	unsigned int type;	if (!sk)		return;	type = classify(skb);	switch (type) {	case PTP_CLASS_V1_IPV4:	case PTP_CLASS_V1_IPV6:	case PTP_CLASS_V2_IPV4:	case PTP_CLASS_V2_IPV6:	case PTP_CLASS_V2_L2:	case PTP_CLASS_V2_VLAN:		phydev = skb->dev->phydev;		if (likely(phydev->drv->txtstamp)) {			if (!atomic_inc_not_zero(&sk->sk_refcnt))				return;			clone = skb_clone(skb, GFP_ATOMIC);			if (!clone) {				sock_put(sk);				return;			}			clone->sk = sk;			phydev->drv->txtstamp(phydev, clone, type);		}		break;	default:		break;	}}
开发者ID:1yankeedt,项目名称:D710BST_FL24_Kernel,代码行数:36,


示例7: dccp_close

void dccp_close(struct sock *sk, long timeout){	struct sk_buff *skb;	lock_sock(sk);	sk->sk_shutdown = SHUTDOWN_MASK;	if (sk->sk_state == DCCP_LISTEN) {		dccp_set_state(sk, DCCP_CLOSED);		/* Special case. */		inet_csk_listen_stop(sk);		goto adjudge_to_death;	}	/*	 * We need to flush the recv. buffs.  We do this only on the	 * descriptor close, not protocol-sourced closes, because the	  *reader process may not have drained the data yet!	 */	/* FIXME: check for unread data */	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {		__kfree_skb(skb);	}	if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {		/* Check zero linger _after_ checking for unread data. */		sk->sk_prot->disconnect(sk, 0);	} else if (dccp_close_state(sk)) {		dccp_send_close(sk, 1);	}	sk_stream_wait_close(sk, timeout);adjudge_to_death:	/*	 * It is the last release_sock in its life. It will remove backlog.	 */	release_sock(sk);	/*	 * Now socket is owned by kernel and we acquire BH lock	 * to finish close. No need to check for user refs.	 */	local_bh_disable();	bh_lock_sock(sk);	BUG_TRAP(!sock_owned_by_user(sk));	sock_hold(sk);	sock_orphan(sk);	/*	 * The last release_sock may have processed the CLOSE or RESET	 * packet moving sock to CLOSED state, if not we have to fire	 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"	 * in draft-ietf-dccp-spec-11. -acme	 */	if (sk->sk_state == DCCP_CLOSING) {		/* FIXME: should start at 2 * RTT */		/* Timer for repeating the CLOSE/CLOSEREQ until an answer. */		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,					  inet_csk(sk)->icsk_rto,					  DCCP_RTO_MAX);#if 0		/* Yeah, we should use sk->sk_prot->orphan_count, etc */		dccp_set_state(sk, DCCP_CLOSED);#endif	}	atomic_inc(sk->sk_prot->orphan_count);	if (sk->sk_state == DCCP_CLOSED)		inet_csk_destroy_sock(sk);	/* Otherwise, socket is reprieved until protocol close. */	bh_unlock_sock(sk);	local_bh_enable();	sock_put(sk);}
开发者ID:BackupTheBerlios,项目名称:tew632-brp-svn,代码行数:80,


示例8: release

static int release(struct socket *sock){	struct sock *sk = sock->sk;	struct tipc_port *tport;	struct sk_buff *buf;	int res;	/*	 * Exit if socket isn't fully initialized (occurs when a failed accept()	 * releases a pre-allocated child socket that was never used)	 */	if (sk == NULL)		return 0;	tport = tipc_sk_port(sk);	lock_sock(sk);	/*	 * Reject all unreceived messages, except on an active connection	 * (which disconnects locally & sends a 'FIN+' to peer)	 */	while (sock->state != SS_DISCONNECTING) {		buf = __skb_dequeue(&sk->sk_receive_queue);		if (buf == NULL)			break;		atomic_dec(&tipc_queue_size);		if (TIPC_SKB_CB(buf)->handle != 0)			kfree_skb(buf);		else {			if ((sock->state == SS_CONNECTING) ||			    (sock->state == SS_CONNECTED)) {				sock->state = SS_DISCONNECTING;				tipc_disconnect(tport->ref);			}			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);		}	}	/*	 * Delete TIPC port; this ensures no more messages are queued	 * (also disconnects an active connection & sends a 'FIN-' to peer)	 */	res = tipc_deleteport(tport->ref);	/* Discard any remaining (connection-based) messages in receive queue */	discard_rx_queue(sk);	/* Reject any messages that accumulated in backlog queue */	sock->state = SS_DISCONNECTING;	release_sock(sk);	sock_put(sk);	sock->sk = NULL;	return res;}
开发者ID:0xroot,项目名称:Blackphone-BP1-Kernel,代码行数:61,


示例9: dccp_v4_err

//.........这里部分代码省略.........	if (!sk) {		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);		return;	}	if (sk->sk_state == DCCP_TIME_WAIT) {		inet_twsk_put(inet_twsk(sk));		return;	}	seq = dccp_hdr_seq(dh);	if (sk->sk_state == DCCP_NEW_SYN_RECV)		return dccp_req_err(sk, seq);	bh_lock_sock(sk);	/* If too many ICMPs get dropped on busy	 * servers this needs to be solved differently.	 */	if (sock_owned_by_user(sk))		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);	if (sk->sk_state == DCCP_CLOSED)		goto out;	dp = dccp_sk(sk);	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);		goto out;	}	switch (type) {	case ICMP_REDIRECT:		dccp_do_redirect(skb, sk);		goto out;	case ICMP_SOURCE_QUENCH:		/* Just silently ignore these. */		goto out;	case ICMP_PARAMETERPROB:		err = EPROTO;		break;	case ICMP_DEST_UNREACH:		if (code > NR_ICMP_UNREACH)			goto out;		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */			if (!sock_owned_by_user(sk))				dccp_do_pmtu_discovery(sk, iph, info);			goto out;		}		err = icmp_err_convert[code].errno;		break;	case ICMP_TIME_EXCEEDED:		err = EHOSTUNREACH;		break;	default:		goto out;	}	switch (sk->sk_state) {	case DCCP_REQUESTING:	case DCCP_RESPOND:		if (!sock_owned_by_user(sk)) {			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);			sk->sk_err = err;			sk->sk_error_report(sk);			dccp_done(sk);		} else			sk->sk_err_soft = err;		goto out;	}	/* If we've already connected we will keep trying	 * until we time out, or the user gives up.	 *	 * rfc1122 4.2.3.9 allows to consider as hard errors	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,	 * but it is obsoleted by pmtu discovery).	 *	 * Note, that in modern internet, where routing is unreliable	 * and in each dark corner broken firewalls sit, sending random	 * errors ordered by their masters even this two messages finally lose	 * their original sense (even Linux sends invalid PORT_UNREACHs)	 *	 * Now we are in compliance with RFCs.	 *							--ANK (980905)	 */	inet = inet_sk(sk);	if (!sock_owned_by_user(sk) && inet->recverr) {		sk->sk_err = err;		sk->sk_error_report(sk);	} else /* Only an error on timeout */		sk->sk_err_soft = err;out:	bh_unlock_sock(sk);	sock_put(sk);}
开发者ID:panyfx,项目名称:ath,代码行数:101,


示例10: vxlan_sock_put

static void vxlan_sock_put(struct sk_buff *skb){	sock_put(skb->sk);}
开发者ID:hisaki,项目名称:ovs,代码行数:4,


示例11: iscsi_sw_tcp_xmit_segment

//.........这里部分代码省略.........	tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,						     CRYPTO_ALG_ASYNC);	tcp_sw_conn->rx_hash.flags = 0;	if (IS_ERR(tcp_sw_conn->rx_hash.tfm))		goto free_tx_tfm;	tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;	return cls_conn;free_tx_tfm:	crypto_free_hash(tcp_sw_conn->tx_hash.tfm);free_conn:	iscsi_conn_printk(KERN_ERR, conn,			  "Could not create connection due to crc32c "			  "loading error. Make sure the crc32c "			  "module is built as a module or into the "			  "kernel/n");	iscsi_tcp_conn_teardown(cls_conn);	return NULL;}static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn){	struct iscsi_session *session = conn->session;	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;	struct socket *sock = tcp_sw_conn->sock;	if (!sock)		return;	sock_hold(sock->sk);	iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);	sock_put(sock->sk);	spin_lock_bh(&session->lock);	tcp_sw_conn->sock = NULL;	spin_unlock_bh(&session->lock);	sockfd_put(sock);}static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn){	struct iscsi_conn *conn = cls_conn->dd_data;	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;	iscsi_sw_tcp_release_conn(conn);	if (tcp_sw_conn->tx_hash.tfm)		crypto_free_hash(tcp_sw_conn->tx_hash.tfm);	if (tcp_sw_conn->rx_hash.tfm)		crypto_free_hash(tcp_sw_conn->rx_hash.tfm);	iscsi_tcp_conn_teardown(cls_conn);}static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag){	struct iscsi_conn *conn = cls_conn->dd_data;	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;	/* userspace may have goofed up and not bound us */	if (!tcp_sw_conn->sock)		return;
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:67,


示例12: dccp_close

void dccp_close(struct sock *sk, long timeout){	struct dccp_sock *dp = dccp_sk(sk);	struct sk_buff *skb;	u32 data_was_unread = 0;	int state;	lock_sock(sk);	sk->sk_shutdown = SHUTDOWN_MASK;	if (sk->sk_state == DCCP_LISTEN) {		dccp_set_state(sk, DCCP_CLOSED);		/* Special case. */		inet_csk_listen_stop(sk);		goto adjudge_to_death;	}	sk_stop_timer(sk, &dp->dccps_xmit_timer);	/*	 * We need to flush the recv. buffs.  We do this only on the	 * descriptor close, not protocol-sourced closes, because the	  *reader process may not have drained the data yet!	 */	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {		data_was_unread += skb->len;		__kfree_skb(skb);	}	if (data_was_unread) {		/* Unread data was tossed, send an appropriate Reset Code */		DCCP_WARN("ABORT with %u bytes unread/n", data_was_unread);		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);		dccp_set_state(sk, DCCP_CLOSED);	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {		/* Check zero linger _after_ checking for unread data. */		sk->sk_prot->disconnect(sk, 0);	} else if (sk->sk_state != DCCP_CLOSED) {		/*		 * Normal connection termination. May need to wait if there are		 * still packets in the TX queue that are delayed by the CCID.		 */		dccp_flush_write_queue(sk, &timeout);		dccp_terminate_connection(sk);	}	/*	 * Flush write queue. This may be necessary in several cases:	 * - we have been closed by the peer but still have application data;	 * - abortive termination (unread data or zero linger time),	 * - normal termination but queue could not be flushed within time limit	 */	__skb_queue_purge(&sk->sk_write_queue);	sk_stream_wait_close(sk, timeout);adjudge_to_death:	state = sk->sk_state;	sock_hold(sk);	sock_orphan(sk);	/*	 * It is the last release_sock in its life. It will remove backlog.	 */	release_sock(sk);	/*	 * Now socket is owned by kernel and we acquire BH lock	 * to finish close. No need to check for user refs.	 */	local_bh_disable();	bh_lock_sock(sk);	WARN_ON(sock_owned_by_user(sk));	percpu_counter_inc(sk->sk_prot->orphan_count);	/* Have we already been destroyed by a softirq or backlog? */	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)		goto out;	if (sk->sk_state == DCCP_CLOSED)		inet_csk_destroy_sock(sk);	/* Otherwise, socket is reprieved until protocol close. */out:	bh_unlock_sock(sk);	local_bh_enable();	sock_put(sk);}
开发者ID:7799,项目名称:linux,代码行数:92,


示例13: MksckPageDescIoctl

static intMksckPageDescIoctl(struct socket *sock,		   unsigned int cmd,		   unsigned long arg){	struct sock *mksck = NULL;	struct sock *sk = sock->sk;	struct MksckPageDescInfo *mpdi;	unsigned long ul[2];	int retval = 0;	switch (cmd) {	case MKSCK_DETACH:		lock_sock(sk);		mpdi = sk->sk_protinfo;		if (copy_from_user(ul, (void *)arg, sizeof(ul))) {			retval = -EFAULT;		} else if (!mpdi || !sk->sk_user_data) {			retval = -EINVAL;		} else {			uint32 flags = calc_vm_prot_bits(ul[0]);			ul[0] = 0;			while (mpdi) {				struct MksckPageDescInfo *next = mpdi->next;				ul[0] += MksckPageDescManage(mpdi->descs,							     mpdi->pages,							     MANAGE_COUNT);				mpdi->mapCounts = ul[1];				mpdi = next;			}			if (copy_to_user((void *)arg, ul, sizeof(ul[0]))) {				retval = -EFAULT;			} else {				mpdi = sk->sk_protinfo;				mpdi->flags = flags;				mksck = (struct sock *)sk->sk_user_data;				sk->sk_user_data = NULL;			}		}		release_sock(sk);		sk = mksck;		if (sk) {			lock_sock(sk);			sock_kfree_s(sk, sk->sk_user_data, sizeof(int));			sk->sk_user_data = NULL;			release_sock(sk);			sock_put(sk);		}		break;	default:		retval = -EINVAL;		break;	}	return retval;}
开发者ID:Alex-V2,项目名称:One_M8_4.4.3_kernel,代码行数:61,


示例14: dccp_v6_err

//.........这里部分代码省略.........		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);	if (sk->sk_state == DCCP_CLOSED)		goto out;	dp = dccp_sk(sk);	seq = dccp_hdr_seq(dh);	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);		goto out;	}	np = inet6_sk(sk);	if (type == NDISC_REDIRECT) {		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);		if (dst)			dst->ops->redirect(dst, sk, skb);		goto out;	}	if (type == ICMPV6_PKT_TOOBIG) {		struct dst_entry *dst = NULL;		if (!ip6_sk_accept_pmtu(sk))			goto out;		if (sock_owned_by_user(sk))			goto out;		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))			goto out;		dst = inet6_csk_update_pmtu(sk, ntohl(info));		if (!dst)			goto out;		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))			dccp_sync_mss(sk, dst_mtu(dst));		goto out;	}	icmpv6_err_convert(type, code, &err);	/* Might be for an request_sock */	switch (sk->sk_state) {		struct request_sock *req, **prev;	case DCCP_LISTEN:		if (sock_owned_by_user(sk))			goto out;		req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,					   &hdr->daddr, &hdr->saddr,					   inet6_iif(skb));		if (req == NULL)			goto out;		/*		 * ICMPs are not backlogged, hence we cannot get an established		 * socket here.		 */		WARN_ON(req->sk != NULL);		if (!between48(seq, dccp_rsk(req)->dreq_iss,				    dccp_rsk(req)->dreq_gss)) {			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);			goto out;		}		inet_csk_reqsk_queue_drop(sk, req, prev);		goto out;	case DCCP_REQUESTING:	case DCCP_RESPOND:  /* Cannot happen.			       It can, it SYNs are crossed. --ANK */		if (!sock_owned_by_user(sk)) {			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);			sk->sk_err = err;			/*			 * Wake people up to see the error			 * (see connect in sock.c)			 */			sk->sk_error_report(sk);			dccp_done(sk);		} else			sk->sk_err_soft = err;		goto out;	}	if (!sock_owned_by_user(sk) && np->recverr) {		sk->sk_err = err;		sk->sk_error_report(sk);	} else		sk->sk_err_soft = err;out:	bh_unlock_sock(sk);	sock_put(sk);}
开发者ID:spacex,项目名称:kernel-centos7,代码行数:101,


示例15: dccp_v6_rcv

static int dccp_v6_rcv(struct sk_buff *skb){	const struct dccp_hdr *dh;	struct sock *sk;	int min_cov;	/* Step 1: Check header basics */	if (dccp_invalid_packet(skb))		goto discard_it;	/* Step 1: If header checksum is incorrect, drop packet and return. */	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,				     &ipv6_hdr(skb)->daddr)) {		DCCP_WARN("dropped packet with invalid checksum/n");		goto discard_it;	}	dh = dccp_hdr(skb);	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;	if (dccp_packet_without_ack(skb))		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;	else		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);	/* Step 2:	 *	Look up flow ID in table and get corresponding socket */	sk = __inet6_lookup_skb(&dccp_hashinfo, skb,			        dh->dccph_sport, dh->dccph_dport);	/*	 * Step 2:	 *	If no socket ...	 */	if (sk == NULL) {		dccp_pr_debug("failed to look up flow ID in table and "			      "get corresponding socket/n");		goto no_dccp_socket;	}	/*	 * Step 2:	 *	... or S.state == TIMEWAIT,	 *		Generate Reset(No Connection) unless P.type == Reset	 *		Drop packet and return	 */	if (sk->sk_state == DCCP_TIME_WAIT) {		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait/n");		inet_twsk_put(inet_twsk(sk));		goto no_dccp_socket;	}	/*	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted	 *	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov	 */	min_cov = dccp_sk(sk)->dccps_pcrlen;	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d/n",			      dh->dccph_cscov, min_cov);		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */		goto discard_and_relse;	}	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))		goto discard_and_relse;	return sk_receive_skb(sk, skb, 1) ? -1 : 0;no_dccp_socket:	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))		goto discard_it;	/*	 * Step 2:	 *	If no socket ...	 *		Generate Reset(No Connection) unless P.type == Reset	 *		Drop packet and return	 */	if (dh->dccph_type != DCCP_PKT_RESET) {		DCCP_SKB_CB(skb)->dccpd_reset_code =					DCCP_RESET_CODE_NO_CONNECTION;		dccp_v6_ctl_send_reset(sk, skb);	}discard_it:	kfree_skb(skb);	return 0;discard_and_relse:	sock_put(sk);	goto discard_it;}
开发者ID:spacex,项目名称:kernel-centos7,代码行数:95,


示例16: dccp_v6_err

//.........这里部分代码省略.........			goto out;		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))			goto out;		/* icmp should have updated the destination cache entry */		dst = __sk_dst_check(sk, np->dst_cookie);		if (dst == NULL) {			struct inet_sock *inet = inet_sk(sk);			struct flowi fl;			/* BUGGG_FUTURE: Again, it is not clear how			   to handle rthdr case. Ignore this complexity			   for now.			 */			memset(&fl, 0, sizeof(fl));			fl.proto = IPPROTO_DCCP;			ipv6_addr_copy(&fl.fl6_dst, &np->daddr);			ipv6_addr_copy(&fl.fl6_src, &np->saddr);			fl.oif = sk->sk_bound_dev_if;			fl.fl_ip_dport = inet->dport;			fl.fl_ip_sport = inet->sport;			security_sk_classify_flow(sk, &fl);			err = ip6_dst_lookup(sk, &dst, &fl);			if (err) {				sk->sk_err_soft = -err;				goto out;			}			err = xfrm_lookup(net, &dst, &fl, sk, 0);			if (err < 0) {				sk->sk_err_soft = -err;				goto out;			}		} else			dst_hold(dst);		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {			dccp_sync_mss(sk, dst_mtu(dst));		} /* else let the usual retransmit timer handle it */		dst_release(dst);		goto out;	}	icmpv6_err_convert(type, code, &err);	/* Might be for an request_sock */	switch (sk->sk_state) {		struct request_sock *req, **prev;	case DCCP_LISTEN:		if (sock_owned_by_user(sk))			goto out;		req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,					   &hdr->daddr, &hdr->saddr,					   inet6_iif(skb));		if (req == NULL)			goto out;		/*		 * ICMPs are not backlogged, hence we cannot get an established		 * socket here.		 */		WARN_ON(req->sk != NULL);		if (seq != dccp_rsk(req)->dreq_iss) {			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);			goto out;		}		inet_csk_reqsk_queue_drop(sk, req, prev);		goto out;	case DCCP_REQUESTING:	case DCCP_RESPOND:  /* Cannot happen.			       It can, it SYNs are crossed. --ANK */		if (!sock_owned_by_user(sk)) {			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);			sk->sk_err = err;			/*			 * Wake people up to see the error			 * (see connect in sock.c)			 */			sk->sk_error_report(sk);			dccp_done(sk);		} else			sk->sk_err_soft = err;		goto out;	}	if (!sock_owned_by_user(sk) && np->recverr) {		sk->sk_err = err;		sk->sk_error_report(sk);	} else		sk->sk_err_soft = err;out:	bh_unlock_sock(sk);	sock_put(sk);}
开发者ID:AppEngine,项目名称:linux-2.6,代码行数:101,


示例17: tcp_v6_err

//.........这里部分代码省略.........	np = inet6_sk(sk);	if (type == ICMPV6_PKT_TOOBIG) {		struct dst_entry *dst = NULL;		if (sock_owned_by_user(sk))			goto out;		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))			goto out;		/* icmp should have updated the destination cache entry */		dst = __sk_dst_check(sk, np->dst_cookie);		if (dst == NULL) {			struct inet_sock *inet = inet_sk(sk);			struct flowi fl;			/* BUGGG_FUTURE: Again, it is not clear how			   to handle rthdr case. Ignore this complexity			   for now.			 */			memset(&fl, 0, sizeof(fl));			fl.proto = IPPROTO_TCP;			ipv6_addr_copy(&fl.fl6_dst, &np->daddr);			ipv6_addr_copy(&fl.fl6_src, &np->saddr);			fl.oif = sk->sk_bound_dev_if;			fl.fl_ip_dport = inet->dport;			fl.fl_ip_sport = inet->sport;			if ((err = ip6_dst_lookup(sk, &dst, &fl))) {				sk->sk_err_soft = -err;				goto out;			}			if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {				sk->sk_err_soft = -err;				goto out;			}		} else			dst_hold(dst);		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {			tcp_sync_mss(sk, dst_mtu(dst));			tcp_simple_retransmit(sk);		} /* else let the usual retransmit timer handle it */		dst_release(dst);		goto out;	}	icmpv6_err_convert(type, code, &err);	/* Might be for an request_sock */	switch (sk->sk_state) {		struct request_sock *req, **prev;	case TCP_LISTEN:		if (sock_owned_by_user(sk))			goto out;		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,					   &hdr->saddr, inet6_iif(skb));		if (!req)			goto out;		/* ICMPs are not backlogged, hence we cannot get		 * an established socket here.		 */		BUG_TRAP(req->sk == NULL);		if (seq != tcp_rsk(req)->snt_isn) {			NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);			goto out;		}		inet_csk_reqsk_queue_drop(sk, req, prev);		goto out;	case TCP_SYN_SENT:	case TCP_SYN_RECV:  /* Cannot happen.			       It can, it SYNs are crossed. --ANK */ 		if (!sock_owned_by_user(sk)) {			sk->sk_err = err;			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */			tcp_done(sk);		} else			sk->sk_err_soft = err;		goto out;	}	if (!sock_owned_by_user(sk) && np->recverr) {		sk->sk_err = err;		sk->sk_error_report(sk);	} else		sk->sk_err_soft = err;out:	bh_unlock_sock(sk);	sock_put(sk);}
开发者ID:mrtos,项目名称:Logitech-Revue,代码行数:101,


示例18: llc_conn_state_process

/** *	llc_conn_state_process - sends event to connection state machine *	@sk: connection *	@skb: occurred event * *	Sends an event to connection state machine. After processing event *	(executing it's actions and changing state), upper layer will be *	indicated or confirmed, if needed. Returns 0 for success, 1 for *	failure. The socket lock has to be held before calling this function. */int llc_conn_state_process(struct sock *sk, struct sk_buff *skb){	int rc;	struct llc_sock *llc = llc_sk(skb->sk);	struct llc_conn_state_ev *ev = llc_conn_ev(skb);	/*	 * We have to hold the skb, because llc_conn_service will kfree it in	 * the sending path and we need to look at the skb->cb, where we encode	 * llc_conn_state_ev.	 */	skb_get(skb);	ev->ind_prim = ev->cfm_prim = 0;	/*	 * Send event to state machine	 */	rc = llc_conn_service(skb->sk, skb);	if (unlikely(rc != 0)) {;		goto out_kfree_skb;	}	if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {		/* indicate or confirm not required */		if (!skb->next)			goto out_kfree_skb;		goto out_skb_put;	}	if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */		skb_get(skb);	switch (ev->ind_prim) {	case LLC_DATA_PRIM:		llc_save_primitive(sk, skb, LLC_DATA_PRIM);		if (unlikely(sock_queue_rcv_skb(sk, skb))) {			/*			 * shouldn't happen			 *///			printk(KERN_ERR "%s: sock_queue_rcv_skb failed!/n",;			kfree_skb(skb);		}		break;	case LLC_CONN_PRIM:		/*		 * Can't be sock_queue_rcv_skb, because we have to leave the		 * skb->sk pointing to the newly created struct sock in		 * llc_conn_handler. -acme		 */		skb_queue_tail(&sk->sk_receive_queue, skb);		sk->sk_state_change(sk);		break;	case LLC_DISC_PRIM:		sock_hold(sk);		if (sk->sk_type == SOCK_STREAM &&		    sk->sk_state == TCP_ESTABLISHED) {			sk->sk_shutdown       = SHUTDOWN_MASK;			sk->sk_socket->state  = SS_UNCONNECTED;			sk->sk_state          = TCP_CLOSE;			if (!sock_flag(sk, SOCK_DEAD)) {				sock_set_flag(sk, SOCK_DEAD);				sk->sk_state_change(sk);			}		}		kfree_skb(skb);		sock_put(sk);		break;	case LLC_RESET_PRIM:		/*		 * FIXME:		 * RESET is not being notified to upper layers for now		 */;		kfree_skb(skb);		break;	default:		if (ev->ind_prim) {//			printk(KERN_INFO "%s: received unknown %d prim!/n",;			kfree_skb(skb);		}		/* No indication */		break;	}	switch (ev->cfm_prim) {	case LLC_DATA_PRIM:		if (!llc_data_accept_state(llc->state))			sk->sk_write_space(sk);//.........这里部分代码省略.........
开发者ID:nos1609,项目名称:Chrono_Kernel-1,代码行数:101,


示例19: __vsock_remove_bound

static void __vsock_remove_bound(struct vsock_sock *vsk){	list_del_init(&vsk->bound_table);	sock_put(&vsk->sk);}
开发者ID:SantoshShilimkar,项目名称:linux,代码行数:5,


示例20: dccp_v4_err

//.........这里部分代码省略.........	    !between48(seq, dp->dccps_swl, dp->dccps_swh)) {		NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);		goto out;	}	switch (type) {	case ICMP_SOURCE_QUENCH:		/* Just silently ignore these. */		goto out;	case ICMP_PARAMETERPROB:		err = EPROTO;		break;	case ICMP_DEST_UNREACH:		if (code > NR_ICMP_UNREACH)			goto out;		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */			if (!sock_owned_by_user(sk))				dccp_do_pmtu_discovery(sk, iph, info);			goto out;		}		err = icmp_err_convert[code].errno;		break;	case ICMP_TIME_EXCEEDED:		err = EHOSTUNREACH;		break;	default:		goto out;	}	switch (sk->sk_state) {		struct request_sock *req , **prev;	case DCCP_LISTEN:		if (sock_owned_by_user(sk))			goto out;		req = inet_csk_search_req(sk, &prev, dh->dccph_dport,					  iph->daddr, iph->saddr);		if (!req)			goto out;		/*		 * ICMPs are not backlogged, hence we cannot get an established		 * socket here.		 */		WARN_ON(req->sk);		if (seq != dccp_rsk(req)->dreq_iss) {			NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);			goto out;		}		/*		 * Still in RESPOND, just remove it silently.		 * There is no good way to pass the error to the newly		 * created socket, and POSIX does not want network		 * errors returned from accept().		 */		inet_csk_reqsk_queue_drop(sk, req, prev);		goto out;	case DCCP_REQUESTING:	case DCCP_RESPOND:		if (!sock_owned_by_user(sk)) {			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);			sk->sk_err = err;			sk->sk_error_report(sk);			dccp_done(sk);		} else			sk->sk_err_soft = err;		goto out;	}	/* If we've already connected we will keep trying	 * until we time out, or the user gives up.	 *	 * rfc1122 4.2.3.9 allows to consider as hard errors	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,	 * but it is obsoleted by pmtu discovery).	 *	 * Note, that in modern internet, where routing is unreliable	 * and in each dark corner broken firewalls sit, sending random	 * errors ordered by their masters even this two messages finally lose	 * their original sense (even Linux sends invalid PORT_UNREACHs)	 *	 * Now we are in compliance with RFCs.	 *							--ANK (980905)	 */	inet = inet_sk(sk);	if (!sock_owned_by_user(sk) && inet->recverr) {		sk->sk_err = err;		sk->sk_error_report(sk);	} else /* Only an error on timeout */		sk->sk_err_soft = err;out:	bh_unlock_sock(sk);	sock_put(sk);}
开发者ID:qwerty1023,项目名称:wive-rtnl-firmware,代码行数:101,


示例21: ax25_rcv

static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,	ax25_address *dev_addr, struct packet_type *ptype){	ax25_address src, dest, *next_digi = NULL;	int type = 0, mine = 0, dama;	struct sock *make, *sk;	ax25_digi dp, reverse_dp;	ax25_cb *ax25;	ax25_dev *ax25_dev;	/*	 *	Process the AX.25/LAPB frame.	 */	skb_reset_transport_header(skb);	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {		kfree_skb(skb);		return 0;	}	/*	 *	Parse the address header.	 */	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) {		kfree_skb(skb);		return 0;	}	/*	 *	Ours perhaps ?	 */	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */		next_digi = &dp.calls[dp.lastrepeat + 1];	/*	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes	 */	skb_pull(skb, ax25_addr_size(&dp));	/* For our port addresses ? */	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)		mine = 1;	/* Also match on any registered callsign from L3/4 */	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)		mine = 1;	/* UI frame - bypass LAPB processing */	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {		skb_set_transport_header(skb, 2); /* skip control and pid */		ax25_send_to_raw(&dest, skb, skb->data[1]);		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) {			kfree_skb(skb);			return 0;		}		/* Now we are pointing at the pid byte */		switch (skb->data[1]) {		case AX25_P_IP:			skb_pull(skb,2);		/* drop PID/CTRL */			skb_reset_transport_header(skb);			skb_reset_network_header(skb);			skb->dev      = dev;			skb->pkt_type = PACKET_HOST;			skb->protocol = htons(ETH_P_IP);			netif_rx(skb);			break;		case AX25_P_ARP:			skb_pull(skb,2);			skb_reset_transport_header(skb);			skb_reset_network_header(skb);			skb->dev      = dev;			skb->pkt_type = PACKET_HOST;			skb->protocol = htons(ETH_P_ARP);			netif_rx(skb);			break;		case AX25_P_TEXT:			/* Now find a suitable dgram socket */			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);			if (sk != NULL) {				bh_lock_sock(sk);				if (atomic_read(&sk->sk_rmem_alloc) >=				    sk->sk_rcvbuf) {					kfree_skb(skb);				} else {					/*					 *	Remove the control and PID.					 */					skb_pull(skb, 2);					if (sock_queue_rcv_skb(sk, skb) != 0)						kfree_skb(skb);				}				bh_unlock_sock(sk);				sock_put(sk);			} else {//.........这里部分代码省略.........
开发者ID:robacklin,项目名称:nxc2620,代码行数:101,


示例22: vsock_accept

static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,			bool kern){	struct sock *listener;	int err;	struct sock *connected;	struct vsock_sock *vconnected;	long timeout;	DEFINE_WAIT(wait);	err = 0;	listener = sock->sk;	lock_sock(listener);	if (sock->type != SOCK_STREAM) {		err = -EOPNOTSUPP;		goto out;	}	if (listener->sk_state != TCP_LISTEN) {		err = -EINVAL;		goto out;	}	/* Wait for children sockets to appear; these are the new sockets	 * created upon connection establishment.	 */	timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);	prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);	while ((connected = vsock_dequeue_accept(listener)) == NULL &&	       listener->sk_err == 0) {		release_sock(listener);		timeout = schedule_timeout(timeout);		finish_wait(sk_sleep(listener), &wait);		lock_sock(listener);		if (signal_pending(current)) {			err = sock_intr_errno(timeout);			goto out;		} else if (timeout == 0) {			err = -EAGAIN;			goto out;		}		prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);	}	finish_wait(sk_sleep(listener), &wait);	if (listener->sk_err)		err = -listener->sk_err;	if (connected) {		listener->sk_ack_backlog--;		lock_sock_nested(connected, SINGLE_DEPTH_NESTING);		vconnected = vsock_sk(connected);		/* If the listener socket has received an error, then we should		 * reject this socket and return.  Note that we simply mark the		 * socket rejected, drop our reference, and let the cleanup		 * function handle the cleanup; the fact that we found it in		 * the listener's accept queue guarantees that the cleanup		 * function hasn't run yet.		 */		if (err) {			vconnected->rejected = true;		} else {			newsock->state = SS_CONNECTED;			sock_graft(connected, newsock);		}		release_sock(connected);		sock_put(connected);	}out:	release_sock(listener);	return err;}
开发者ID:SantoshShilimkar,项目名称:linux,代码行数:81,


示例23: dn_nsp_rx_packet

static int dn_nsp_rx_packet(struct sk_buff *skb){	struct dn_skb_cb *cb = DN_SKB_CB(skb);	struct sock *sk = NULL;	unsigned char *ptr = (unsigned char *)skb->data;	unsigned short reason = NSP_REASON_NL;	skb->h.raw    = skb->data;	cb->nsp_flags = *ptr++;	if (decnet_debug_level & 2)		printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x/n", (int)cb->nsp_flags);	if (skb->len < 2) 		goto free_out;	if (cb->nsp_flags & 0x83) 		goto free_out;	/*	 * Returned packets...	 * Swap src & dst and look up in the normal way.	 */	if (cb->rt_flags & DN_RT_F_RTS) {		unsigned short tmp = cb->dst_port;		cb->dst_port = cb->src_port;		cb->src_port = tmp;		tmp = cb->dst;		cb->dst = cb->src;		cb->src = tmp;		sk = dn_find_by_skb(skb);		goto got_it;	}	/*	 * Filter out conninits and useless packet types	 */	if ((cb->nsp_flags & 0x0c) == 0x08) {		switch(cb->nsp_flags & 0x70) {			case 0x00: /* NOP */			case 0x70: /* Reserved */			case 0x50: /* Reserved, Phase II node init */				goto free_out;			case 0x10:			case 0x60:				sk = dn_find_listener(skb, &reason);				goto got_it;		}	}	if (skb->len < 3)		goto free_out;	/*	 * Grab the destination address.	 */	cb->dst_port = *(unsigned short *)ptr;	cb->src_port = 0;	ptr += 2;	/*	 * If not a connack, grab the source address too.	 */	if (skb->len >= 5) {		cb->src_port = *(unsigned short *)ptr;		ptr += 2;		skb_pull(skb, 5);	}	/*	 * Find the socket to which this skb is destined.	 */	sk = dn_find_by_skb(skb);got_it:	if (sk != NULL) {		struct dn_scp *scp = DN_SK(sk);		int ret;		/* Reset backoff */		scp->nsp_rxtshift = 0;		bh_lock_sock(sk);		ret = NET_RX_SUCCESS;		if (decnet_debug_level & 8)			printk(KERN_DEBUG "NSP: 0x%02x 0x%02x 0x%04x 0x%04x %d/n",				(int)cb->rt_flags, (int)cb->nsp_flags, 				(int)cb->src_port, (int)cb->dst_port, 				(int)sk->lock.users);		if (sk->lock.users == 0)			ret = dn_nsp_backlog_rcv(sk, skb);		else			sk_add_backlog(sk, skb);		bh_unlock_sock(sk);		sock_put(sk);		return ret;	}	return dn_nsp_no_socket(skb, reason);//.........这里部分代码省略.........
开发者ID:iwangv,项目名称:edimax-br-6528n,代码行数:101,


示例24: __vsock_remove_connected

static void __vsock_remove_connected(struct vsock_sock *vsk){	list_del_init(&vsk->connected_table);	sock_put(&vsk->sk);}
开发者ID:SantoshShilimkar,项目名称:linux,代码行数:5,


示例25: tcp_v6_err

//.........这里部分代码省略.........	tp = tcp_sk(sk);	seq = ntohl(th->seq);	if (sk->sk_state != TCP_LISTEN &&	    !between(seq, tp->snd_una, tp->snd_nxt)) {		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);		goto out;	}	np = inet6_sk(sk);	if (type == ICMPV6_PKT_TOOBIG) {		struct dst_entry *dst;		if (sock_owned_by_user(sk))			goto out;		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))			goto out;				dst = __sk_dst_check(sk, np->dst_cookie);		if (dst == NULL) {			struct inet_sock *inet = inet_sk(sk);			struct flowi6 fl6;			memset(&fl6, 0, sizeof(fl6));			fl6.flowi6_proto = IPPROTO_TCP;			fl6.daddr = np->daddr;			fl6.saddr = np->saddr;			fl6.flowi6_oif = sk->sk_bound_dev_if;			fl6.flowi6_mark = sk->sk_mark;			fl6.fl6_dport = inet->inet_dport;			fl6.fl6_sport = inet->inet_sport;			fl6.flowi6_uid = sock_i_uid(sk);			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);			if (IS_ERR(dst)) {				sk->sk_err_soft = -PTR_ERR(dst);				goto out;			}		} else			dst_hold(dst);		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {			tcp_sync_mss(sk, dst_mtu(dst));			tcp_simple_retransmit(sk);		} 		dst_release(dst);		goto out;	}	icmpv6_err_convert(type, code, &err);		switch (sk->sk_state) {		struct request_sock *req, **prev;	case TCP_LISTEN:		if (sock_owned_by_user(sk))			goto out;		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,					   &hdr->saddr, inet6_iif(skb));		if (!req)			goto out;		WARN_ON(req->sk != NULL);		if (seq != tcp_rsk(req)->snt_isn) {			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);			goto out;		}		inet_csk_reqsk_queue_drop(sk, req, prev);		goto out;	case TCP_SYN_SENT:	case TCP_SYN_RECV:  		if (!sock_owned_by_user(sk)) {			sk->sk_err = err;			sk->sk_error_report(sk);					tcp_done(sk);		} else			sk->sk_err_soft = err;		goto out;	}	if (!sock_owned_by_user(sk) && np->recverr) {		sk->sk_err = err;		sk->sk_error_report(sk);	} else		sk->sk_err_soft = err;out:	bh_unlock_sock(sk);	sock_put(sk);}
开发者ID:Leoyzen,项目名称:Charm-Eye,代码行数:101,


示例26: dccp_close

void dccp_close(struct sock *sk, long timeout){	struct dccp_sock *dp = dccp_sk(sk);	struct sk_buff *skb;	u32 data_was_unread = 0;	int state;	lock_sock(sk);	sk->sk_shutdown = SHUTDOWN_MASK;	if (sk->sk_state == DCCP_LISTEN) {		dccp_set_state(sk, DCCP_CLOSED);		/* Special case. */		inet_csk_listen_stop(sk);		goto adjudge_to_death;	}	sk_stop_timer(sk, &dp->dccps_xmit_timer);	/*	 * We need to flush the recv. buffs.  We do this only on the	 * descriptor close, not protocol-sourced closes, because the	  *reader process may not have drained the data yet!	 */	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {		data_was_unread += skb->len;		__kfree_skb(skb);	}	if (data_was_unread) {		/* Unread data was tossed, send an appropriate Reset Code */		DCCP_WARN("DCCP: ABORT -- %u bytes unread/n", data_was_unread);		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);		dccp_set_state(sk, DCCP_CLOSED);	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {		/* Check zero linger _after_ checking for unread data. */		sk->sk_prot->disconnect(sk, 0);	} else if (sk->sk_state != DCCP_CLOSED) {		dccp_terminate_connection(sk);	}	sk_stream_wait_close(sk, timeout);adjudge_to_death:	state = sk->sk_state;	sock_hold(sk);	sock_orphan(sk);	atomic_inc(sk->sk_prot->orphan_count);	/*	 * It is the last release_sock in its life. It will remove backlog.	 */	release_sock(sk);	/*	 * Now socket is owned by kernel and we acquire BH lock	 * to finish close. No need to check for user refs.	 */	local_bh_disable();	bh_lock_sock(sk);	BUG_TRAP(!sock_owned_by_user(sk));	/* Have we already been destroyed by a softirq or backlog? */	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)		goto out;	if (sk->sk_state == DCCP_CLOSED)		inet_csk_destroy_sock(sk);	/* Otherwise, socket is reprieved until protocol close. */out:	bh_unlock_sock(sk);	local_bh_enable();	sock_put(sk);}
开发者ID:miettal,项目名称:armadillo420_standard,代码行数:78,


示例27: dccp_v4_rcv

/* this is called when real data arrives */static int dccp_v4_rcv(struct sk_buff *skb){	const struct dccp_hdr *dh;	const struct iphdr *iph;	struct sock *sk;	int min_cov;	/* Step 1: Check header basics */	if (dccp_invalid_packet(skb))		goto discard_it;	iph = ip_hdr(skb);	/* Step 1: If header checksum is incorrect, drop packet and return */	if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {		DCCP_WARN("dropped packet with invalid checksum/n");		goto discard_it;	}	dh = dccp_hdr(skb);	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;	dccp_pr_debug("%8.8s src=%[email
C++ sock_queue_rcv_skb函数代码示例
C++ sock_prot_inuse_add函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。