您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ u64_stats_update_begin函数代码示例

51自学网 2021-06-03 09:02:56
  C++
这篇教程C++ u64_stats_update_begin函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中u64_stats_update_begin函数的典型用法代码示例。如果您正苦于以下问题:C++ u64_stats_update_begin函数的具体用法?C++ u64_stats_update_begin怎么用?C++ u64_stats_update_begin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了u64_stats_update_begin函数的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: vlan_dev_hard_start_xmit

static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,					    struct net_device *dev){	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);	unsigned int len;	int ret;	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.	 *	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...	 */	if (veth->h_vlan_proto != vlan->vlan_proto ||	    vlan->flags & VLAN_FLAG_REORDER_HDR) {		u16 vlan_tci;		vlan_tci = vlan->vlan_id;		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);		__vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);	}	skb->dev = vlan->real_dev;	len = skb->len;	if (unlikely(netpoll_tx_running(dev)))		return vlan_netpoll_send_skb(vlan, skb);	ret = dev_queue_xmit(skb);	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {		struct vlan_pcpu_stats *stats;		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);		u64_stats_update_begin(&stats->syncp);		stats->tx_packets++;		stats->tx_bytes += len;		u64_stats_update_end(&stats->syncp);	} else {		this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);	}	return ret;}
开发者ID:RafaelFazzolino,项目名称:linux,代码行数:42,


示例2: ovs_vport_send

/** *	ovs_vport_send - send a packet on a device * * @vport: vport on which to send the packet * @skb: skb to send * * Sends the given packet and returns the length of data sent.  Either ovs * lock or rcu_read_lock must be held. */int ovs_vport_send(struct vport *vport, struct sk_buff *skb){	int sent = vport->ops->send(vport, skb);	if (likely(sent > 0)) {		struct pcpu_sw_netstats *stats;		stats = this_cpu_ptr(vport->percpu_stats);		u64_stats_update_begin(&stats->syncp);		stats->tx_packets++;		stats->tx_bytes += sent;		u64_stats_update_end(&stats->syncp);	} else if (sent < 0) {		ovs_vport_record_error(vport, VPORT_E_TX_ERROR);	} else {		ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);	}	return sent;}
开发者ID:Flipkart,项目名称:linux,代码行数:29,


示例3: nft_update_chain_stats

static noinline void nft_update_chain_stats(const struct nft_chain *chain,					    const struct nft_pktinfo *pkt){	struct nft_base_chain *base_chain;	struct nft_stats *stats;	base_chain = nft_base_chain(chain);	if (!base_chain->stats)		return;	local_bh_disable();	stats = this_cpu_ptr(rcu_dereference(base_chain->stats));	if (stats) {		u64_stats_update_begin(&stats->syncp);		stats->pkts++;		stats->bytes += pkt->skb->len;		u64_stats_update_end(&stats->syncp);	}	local_bh_enable();}
开发者ID:krzk,项目名称:linux,代码行数:20,


示例4: br_pass_frame_up

/* Bridge group multicast address 802.1d (pg 51). */const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };int qos_enable = 0;		/*  added pling 03/13/2007 */static int br_pass_frame_up(struct sk_buff *skb){	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;	struct net_bridge *br = netdev_priv(brdev);	struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);	u64_stats_update_begin(&brstats->syncp);	brstats->rx_packets++;	brstats->rx_bytes += skb->len;	u64_stats_update_end(&brstats->syncp);	indev = skb->dev;	/*  wklin added, 2010/06/15 @attach_dev */	if (htons(ETH_P_ARP) == eth_hdr(skb)->h_proto)	    *(pp_bridge_indev(skb)) = indev;/*backup incoming port to be used in arp.c */	skb->dev = brdev;	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,		       netif_receive_skb);}
开发者ID:jhu-chang,项目名称:r6300v2,代码行数:24,


示例5: internal_dev_xmit

/* Called with rcu_read_lock_bh. */static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev){	int len, err;	len = skb->len;	rcu_read_lock();	err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);	rcu_read_unlock();	if (likely(!err)) {		struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);		u64_stats_update_begin(&tstats->syncp);		tstats->tx_bytes += len;		tstats->tx_packets++;		u64_stats_update_end(&tstats->syncp);	} else {		netdev->stats.tx_errors++;	}	return 0;}
开发者ID:020gzh,项目名称:linux,代码行数:22,


示例6: vti_rcv_cb

static int vti_rcv_cb(struct sk_buff *skb, int err){	unsigned short family;	struct net_device *dev;	struct pcpu_sw_netstats *tstats;	struct xfrm_state *x;	struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;	if (!tunnel)		return 1;	dev = tunnel->dev;	if (err) {		dev->stats.rx_errors++;		dev->stats.rx_dropped++;		return 0;	}	x = xfrm_input_state(skb);	family = x->inner_mode->afinfo->family;	if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))		return -EPERM;	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));	skb->dev = dev;	tstats = this_cpu_ptr(dev->tstats);	u64_stats_update_begin(&tstats->syncp);	tstats->rx_packets++;	tstats->rx_bytes += skb->len;	u64_stats_update_end(&tstats->syncp);	return 0;}
开发者ID:383530895,项目名称:linux,代码行数:38,


示例7: ovs_dp_upcall

int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,		  const struct dp_upcall_info *upcall_info){	struct dp_stats_percpu *stats;	int dp_ifindex;	int err;	if (upcall_info->pid == 0) {		err = -ENOTCONN;		goto err;	}	dp_ifindex = get_dpifindex(dp);	if (!dp_ifindex) {		err = -ENODEV;		goto err;	}	forward_ip_summed(skb, true);	if (!skb_is_gso(skb))		err = queue_userspace_packet(dp_ifindex, skb, upcall_info);	else		err = queue_gso_packets(dp_ifindex, skb, upcall_info);	if (err)		goto err;	return 0;err:	stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());	u64_stats_update_begin(&stats->sync);	stats->n_lost++;	u64_stats_update_end(&stats->sync);	return err;}
开发者ID:axray,项目名称:dataware.dreamplug,代码行数:38,


示例8: ovs_vport_receive

/** *	ovs_vport_receive - pass up received packet to the datapath for processing * * @vport: vport that received the packet * @skb: skb that was received * @tun_key: tunnel (if any) that carried packet * * Must be called with rcu_read_lock.  The packet cannot be shared and * skb->data should point to the Ethernet header. */void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,		       const struct ovs_tunnel_info *tun_info){	struct pcpu_sw_netstats *stats;	struct sw_flow_key key;	int error;	stats = this_cpu_ptr(vport->percpu_stats);	u64_stats_update_begin(&stats->syncp);	stats->rx_packets++;	stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);	u64_stats_update_end(&stats->syncp);	OVS_CB(skb)->input_vport = vport;	OVS_CB(skb)->egress_tun_info = NULL;	/* Extract flow from 'skb' into 'key'. */	error = ovs_flow_key_extract(tun_info, skb, &key);	if (unlikely(error)) {		kfree_skb(skb);		return;	}	ovs_dp_process_packet(skb, &key);}
开发者ID:Flipkart,项目名称:linux,代码行数:33,


示例9: loopback_xmit

/* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */static netdev_tx_t loopback_xmit(struct sk_buff *skb,				 struct net_device *dev){	struct pcpu_lstats *lb_stats;	int len;	skb_orphan(skb);	skb->protocol = eth_type_trans(skb, dev);	/* it's OK to use per_cpu_ptr() because BHs are off */	lb_stats = this_cpu_ptr(dev->lstats);	len = skb->len;	if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {		u64_stats_update_begin(&lb_stats->syncp);		lb_stats->bytes += len;		lb_stats->packets++;		u64_stats_update_end(&lb_stats->syncp);	}	return NETDEV_TX_OK;}
开发者ID:AllenDou,项目名称:linux,代码行数:27,


示例10: ovs_dp_upcall

int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,		  const struct dp_upcall_info *upcall_info){	struct dp_stats_percpu *stats;	int dp_ifindex;	int err;	if (upcall_info->portid == 0) {		err = -ENOTCONN;		goto err;	}	dp_ifindex = get_dpifindex(dp);	if (!dp_ifindex) {		err = -ENODEV;		goto err;	}	if (!skb_is_gso(skb))		err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);	else		err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);	if (err)		goto err;	return 0;err:	stats = this_cpu_ptr(dp->stats_percpu);	u64_stats_update_begin(&stats->sync);	stats->n_lost++;	u64_stats_update_end(&stats->sync);	return err;}
开发者ID:AiWinters,项目名称:linux,代码行数:36,


示例11: br_pass_frame_up

static int br_pass_frame_up(struct sk_buff *skb){	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;	struct net_bridge *br = netdev_priv(brdev);	struct net_bridge_vlan_group *vg;	struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);	u64_stats_update_begin(&brstats->syncp);	brstats->rx_packets++;	brstats->rx_bytes += skb->len;	u64_stats_update_end(&brstats->syncp);	vg = br_vlan_group_rcu(br);	/* Bridge is just like any other port.  Make sure the	 * packet is allowed except in promisc modue when someone	 * may be running packet capture.	 */	if (!(brdev->flags & IFF_PROMISC) &&	    !br_allowed_egress(vg, skb)) {		kfree_skb(skb);		return NET_RX_DROP;	}	indev = skb->dev;	skb->dev = brdev;	skb = br_handle_vlan(br, vg, skb);	if (!skb)		return NET_RX_DROP;	/* update the multicast stats if the packet is IGMP/MLD */	br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),			   BR_MCAST_DIR_TX);	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,		       dev_net(indev), NULL, skb, indev, NULL,		       br_netif_receive_skb);}
开发者ID:AK101111,项目名称:linux,代码行数:36,


示例12: ovs_ip_tunnel_rcv

/* derived from ip_tunnel_rcv(). */void ovs_ip_tunnel_rcv(struct net_device *dev, struct sk_buff *skb,		       struct metadata_dst *tun_dst){	struct pcpu_sw_netstats *tstats;	tstats = this_cpu_ptr((struct pcpu_sw_netstats __percpu *)dev->tstats);	u64_stats_update_begin(&tstats->syncp);	tstats->rx_packets++;	tstats->rx_bytes += skb->len;	u64_stats_update_end(&tstats->syncp);	skb_reset_mac_header(skb);	skb_scrub_packet(skb, false);	skb->protocol = eth_type_trans(skb, dev);	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);	ovs_skb_dst_set(skb, (struct dst_entry *)tun_dst);#ifndef USE_UPSTREAM_TUNNEL	netdev_port_receive(skb, &tun_dst->u.tun_info);#else	netif_rx(skb);#endif}
开发者ID:blp,项目名称:ovs-reviews,代码行数:25,


示例13: netvsc_recv_callback

/* * netvsc_recv_callback -  Callback when we receive a packet from the * "wire" on the specified device. */int netvsc_recv_callback(struct hv_device *device_obj,				struct hv_netvsc_packet *packet,				void **data,				struct ndis_tcp_ip_checksum_info *csum_info,				struct vmbus_channel *channel,				u16 vlan_tci){	struct net_device *net;	struct net_device_context *net_device_ctx;	struct sk_buff *skb;	struct netvsc_stats *rx_stats;	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;	if (!net || net->reg_state != NETREG_REGISTERED) {		return NVSP_STAT_FAIL;	}	net_device_ctx = netdev_priv(net);	rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);	/* Allocate a skb - TODO direct I/O to pages? */	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);	if (unlikely(!skb)) {		++net->stats.rx_dropped;		return NVSP_STAT_FAIL;	}	/*	 * Copy to skb. This copy is needed here since the memory pointed by	 * hv_netvsc_packet cannot be deallocated	 */	memcpy(skb_put(skb, packet->total_data_buflen), *data,		packet->total_data_buflen);	skb->protocol = eth_type_trans(skb, net);	if (csum_info) {		/* We only look at the IP checksum here.		 * Should we be dropping the packet if checksum		 * failed? How do we deal with other checksums - TCP/UDP?		 */		if (csum_info->receive.ip_checksum_succeeded)			skb->ip_summed = CHECKSUM_UNNECESSARY;		else			skb->ip_summed = CHECKSUM_NONE;	}	if (vlan_tci & VLAN_TAG_PRESENT)		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),				       vlan_tci);	skb_record_rx_queue(skb, channel->			    offermsg.offer.sub_channel_index);	u64_stats_update_begin(&rx_stats->syncp);	rx_stats->packets++;	rx_stats->bytes += packet->total_data_buflen;	u64_stats_update_end(&rx_stats->syncp);	/*	 * Pass the skb back up. Network stack will deallocate the skb when it	 * is done.	 * TODO - use NAPI?	 */	netif_rx(skb);	return 0;}
开发者ID:andy-shev,项目名称:linux,代码行数:70,


示例14: xennet_start_xmit

//.........这里部分代码省略.........	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {		net_alert_ratelimited(			"xennet: skb->len = %u, too big for wire format/n",			skb->len);		goto drop;	}	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +		xennet_count_skb_frag_slots(skb);	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {		net_alert_ratelimited(			"xennet: skb rides the rocket: %d slots/n", slots);		goto drop;	}	spin_lock_irqsave(&np->tx_lock, flags);	if (unlikely(!netif_carrier_ok(dev) ||		     (slots > 1 && !xennet_can_sg(dev)) ||		     netif_needs_gso(skb, netif_skb_features(skb)))) {		spin_unlock_irqrestore(&np->tx_lock, flags);		goto drop;	}	i = np->tx.req_prod_pvt;	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);	np->tx_skbs[id].skb = skb;	tx = RING_GET_REQUEST(&np->tx, i);	tx->id   = id;	ref = gnttab_claim_grant_reference(&np->gref_tx_head);	BUG_ON((signed short)ref < 0);	mfn = virt_to_mfn(data);	gnttab_grant_foreign_access_ref(		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);	tx->gref = np->grant_tx_ref[id] = ref;	tx->offset = offset;	tx->size = len;	extra = NULL;	tx->flags = 0;	if (skb->ip_summed == CHECKSUM_PARTIAL)		/* local packet? */		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)		/* remote but checksummed. */		tx->flags |= XEN_NETTXF_data_validated;	if (skb_shinfo(skb)->gso_size) {		struct xen_netif_extra_info *gso;		gso = (struct xen_netif_extra_info *)			RING_GET_REQUEST(&np->tx, ++i);		if (extra)			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;		else			tx->flags |= XEN_NETTXF_extra_info;		gso->u.gso.size = skb_shinfo(skb)->gso_size;		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;		gso->u.gso.pad = 0;		gso->u.gso.features = 0;		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;		gso->flags = 0;		extra = gso;	}	np->tx.req_prod_pvt = i + 1;	xennet_make_frags(skb, dev, tx);	tx->size = skb->len;	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);	if (notify)		notify_remote_via_irq(np->netdev->irq);	u64_stats_update_begin(&stats->syncp);	stats->tx_bytes += skb->len;	stats->tx_packets++;	u64_stats_update_end(&stats->syncp);	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */	xennet_tx_buf_gc(dev);	if (!netfront_tx_slot_available(np))		netif_stop_queue(dev);	spin_unlock_irqrestore(&np->tx_lock, flags);	return NETDEV_TX_OK; drop:	dev->stats.tx_dropped++;	dev_kfree_skb(skb);	return NETDEV_TX_OK;}
开发者ID:jay-caoj,项目名称:linux-3.9.6,代码行数:101,


示例15: netvsc_recv_callback

/* * netvsc_recv_callback -  Callback when we receive a packet from the * "wire" on the specified device. */int netvsc_recv_callback(struct hv_device *device_obj,				struct hv_netvsc_packet *packet,				void **data,				struct ndis_tcp_ip_checksum_info *csum_info,				struct vmbus_channel *channel,				u16 vlan_tci){	struct net_device *net = hv_get_drvdata(device_obj);	struct net_device_context *net_device_ctx = netdev_priv(net);	struct net_device *vf_netdev;	struct sk_buff *skb;	struct netvsc_stats *rx_stats;	if (net->reg_state != NETREG_REGISTERED)		return NVSP_STAT_FAIL;	/*	 * If necessary, inject this packet into the VF interface.	 * On Hyper-V, multicast and brodcast packets are only delivered	 * to the synthetic interface (after subjecting these to	 * policy filters on the host). Deliver these via the VF	 * interface in the guest.	 */	vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);	if (vf_netdev && (vf_netdev->flags & IFF_UP))		net = vf_netdev;	/* Allocate a skb - TODO direct I/O to pages? */	skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);	if (unlikely(!skb)) {		++net->stats.rx_dropped;		return NVSP_STAT_FAIL;	}	if (net != vf_netdev)		skb_record_rx_queue(skb,				    channel->offermsg.offer.sub_channel_index);	/*	 * Even if injecting the packet, record the statistics	 * on the synthetic device because modifying the VF device	 * statistics will not work correctly.	 */	rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);	u64_stats_update_begin(&rx_stats->syncp);	rx_stats->packets++;	rx_stats->bytes += packet->total_data_buflen;	if (skb->pkt_type == PACKET_BROADCAST)		++rx_stats->broadcast;	else if (skb->pkt_type == PACKET_MULTICAST)		++rx_stats->multicast;	u64_stats_update_end(&rx_stats->syncp);	/*	 * Pass the skb back up. Network stack will deallocate the skb when it	 * is done.	 * TODO - use NAPI?	 */	netif_rx(skb);	return 0;}
开发者ID:mansr,项目名称:linux-tangox,代码行数:67,


示例16: netvsc_start_xmit

//.........这里部分代码省略.........		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>				VLAN_PRIO_SHIFT;	}	net_trans_info = get_net_transport_info(skb, &hdr_offset);	/*	 * Setup the sendside checksum offload only if this is not a	 * GSO packet.	 */	if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {		struct ndis_tcp_lso_info *lso_info;		rndis_msg_size += NDIS_LSO_PPI_SIZE;		ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,				    TCP_LARGESEND_PKTINFO);		lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +							ppi->ppi_offset);		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;		if (net_trans_info & (INFO_IPV4 << 16)) {			lso_info->lso_v2_transmit.ip_version =				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;			ip_hdr(skb)->tot_len = 0;			ip_hdr(skb)->check = 0;			tcp_hdr(skb)->check =				~csum_tcpudp_magic(ip_hdr(skb)->saddr,						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);		} else {			lso_info->lso_v2_transmit.ip_version =				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;			ipv6_hdr(skb)->payload_len = 0;			tcp_hdr(skb)->check =				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,						 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);		}		lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {		if (net_trans_info & INFO_TCP) {			rndis_msg_size += NDIS_CSUM_PPI_SIZE;			ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,					    TCPIP_CHKSUM_PKTINFO);			csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +									 ppi->ppi_offset);			if (net_trans_info & (INFO_IPV4 << 16))				csum_info->transmit.is_ipv4 = 1;			else				csum_info->transmit.is_ipv6 = 1;			csum_info->transmit.tcp_checksum = 1;			csum_info->transmit.tcp_header_offset = hdr_offset;		} else {			/* UDP checksum (and other) offload is not supported. */			if (skb_checksum_help(skb))				goto drop;		}	}	/* Start filling in the page buffers with the rndis hdr */	rndis_msg->msg_len += rndis_msg_size;	packet->total_data_buflen = rndis_msg->msg_len;	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,					       skb, packet, &pb);	/* timestamp packet in software */	skb_tx_timestamp(skb);	ret = netvsc_send(net_device_ctx->device_ctx, packet,			  rndis_msg, &pb, skb);	if (likely(ret == 0)) {		struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);		u64_stats_update_begin(&tx_stats->syncp);		tx_stats->packets++;		tx_stats->bytes += skb_length;		u64_stats_update_end(&tx_stats->syncp);		return NETDEV_TX_OK;	}	if (ret == -EAGAIN) {		++net_device_ctx->eth_stats.tx_busy;		return NETDEV_TX_BUSY;	}	if (ret == -ENOSPC)		++net_device_ctx->eth_stats.tx_no_space;drop:	dev_kfree_skb_any(skb);	net->stats.tx_dropped++;	return NETDEV_TX_OK;no_memory:	++net_device_ctx->eth_stats.tx_no_memory;	goto drop;}
开发者ID:mansr,项目名称:linux-tangox,代码行数:101,


示例17: ixgbe_clean_rx_irq_zc

//.........这里部分代码省略.........			cleaned_count = 0;		}		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);		size = le16_to_cpu(rx_desc->wb.upper.length);		if (!size)			break;		/* This memory barrier is needed to keep us from reading		 * any other fields out of the rx_desc until we know the		 * descriptor has been written back		 */		dma_rmb();		bi = ixgbe_get_rx_buffer_zc(rx_ring, size);		if (unlikely(!ixgbe_test_staterr(rx_desc,						 IXGBE_RXD_STAT_EOP))) {			struct ixgbe_rx_buffer *next_bi;			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);			ixgbe_inc_ntc(rx_ring);			next_bi =			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];			next_bi->skb = ERR_PTR(-EINVAL);			continue;		}		if (unlikely(bi->skb)) {			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);			ixgbe_inc_ntc(rx_ring);			continue;		}		xdp.data = bi->addr;		xdp.data_meta = xdp.data;		xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;		xdp.data_end = xdp.data + size;		xdp.handle = bi->handle;		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);		if (xdp_res) {			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {				xdp_xmit |= xdp_res;				bi->addr = NULL;				bi->skb = NULL;			} else {				ixgbe_reuse_rx_buffer_zc(rx_ring, bi);			}			total_rx_packets++;			total_rx_bytes += size;			cleaned_count++;			ixgbe_inc_ntc(rx_ring);			continue;		}		/* XDP_PASS path */		skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);		if (!skb) {			rx_ring->rx_stats.alloc_rx_buff_failed++;			break;		}		cleaned_count++;		ixgbe_inc_ntc(rx_ring);		if (eth_skb_pad(skb))			continue;		total_rx_bytes += skb->len;		total_rx_packets++;		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);		ixgbe_rx_skb(q_vector, skb);	}	if (xdp_xmit & IXGBE_XDP_REDIR)		xdp_do_flush_map();	if (xdp_xmit & IXGBE_XDP_TX) {		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];		/* Force memory writes to complete before letting h/w		 * know there are new descriptors to fetch.		 */		wmb();		writel(ring->next_to_use, ring->tail);	}	u64_stats_update_begin(&rx_ring->syncp);	rx_ring->stats.packets += total_rx_packets;	rx_ring->stats.bytes += total_rx_bytes;	u64_stats_update_end(&rx_ring->syncp);	q_vector->rx.total_packets += total_rx_packets;	q_vector->rx.total_bytes += total_rx_bytes;	return failure ? budget : (int)total_rx_packets;}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:101,


示例18: ixgbe_clean_xdp_tx_irq

bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,			    struct ixgbe_ring *tx_ring, int napi_budget){	unsigned int total_packets = 0, total_bytes = 0;	u32 i = tx_ring->next_to_clean, xsk_frames = 0;	unsigned int budget = q_vector->tx.work_limit;	struct xdp_umem *umem = tx_ring->xsk_umem;	union ixgbe_adv_tx_desc *tx_desc;	struct ixgbe_tx_buffer *tx_bi;	bool xmit_done;	tx_bi = &tx_ring->tx_buffer_info[i];	tx_desc = IXGBE_TX_DESC(tx_ring, i);	i -= tx_ring->count;	do {		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))			break;		total_bytes += tx_bi->bytecount;		total_packets += tx_bi->gso_segs;		if (tx_bi->xdpf)			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);		else			xsk_frames++;		tx_bi->xdpf = NULL;		total_bytes += tx_bi->bytecount;		tx_bi++;		tx_desc++;		i++;		if (unlikely(!i)) {			i -= tx_ring->count;			tx_bi = tx_ring->tx_buffer_info;			tx_desc = IXGBE_TX_DESC(tx_ring, 0);		}		/* issue prefetch for next Tx descriptor */		prefetch(tx_desc);		/* update budget accounting */		budget--;	} while (likely(budget));	i += tx_ring->count;	tx_ring->next_to_clean = i;	u64_stats_update_begin(&tx_ring->syncp);	tx_ring->stats.bytes += total_bytes;	tx_ring->stats.packets += total_packets;	u64_stats_update_end(&tx_ring->syncp);	q_vector->tx.total_bytes += total_bytes;	q_vector->tx.total_packets += total_packets;	if (xsk_frames)		xsk_umem_complete_tx(umem, xsk_frames);	xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);	return budget > 0 && xmit_done;}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:62,


示例19: vrf_process_v4_outbound

static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,					   struct net_device *vrf_dev){	struct iphdr *ip4h = ip_hdr(skb);	int ret = NET_XMIT_DROP;	struct flowi4 fl4 = {		/* needed to match OIF rule */		.flowi4_oif = vrf_dev->ifindex,		.flowi4_iif = LOOPBACK_IFINDEX,		.flowi4_tos = RT_TOS(ip4h->tos),		.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,		.flowi4_proto = ip4h->protocol,		.daddr = ip4h->daddr,		.saddr = ip4h->saddr,	};	struct net *net = dev_net(vrf_dev);	struct rtable *rt;	rt = ip_route_output_flow(net, &fl4, NULL);	if (IS_ERR(rt))		goto err;	skb_dst_drop(skb);	/* if dst.dev is loopback or the VRF device again this is locally	 * originated traffic destined to a local address. Short circuit	 * to Rx path using our local dst	 */	if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {		struct net_vrf *vrf = netdev_priv(vrf_dev);		struct rtable *rth_local;		struct dst_entry *dst = NULL;		ip_rt_put(rt);		rcu_read_lock();		rth_local = rcu_dereference(vrf->rth_local);		if (likely(rth_local)) {			dst = &rth_local->dst;			dst_hold(dst);		}		rcu_read_unlock();		if (unlikely(!dst))			goto err;		return vrf_local_xmit(skb, vrf_dev, dst);	}	skb_dst_set(skb, &rt->dst);	/* strip the ethernet header added for pass through VRF device */	__skb_pull(skb, skb_network_offset(skb));	if (!ip4h->saddr) {		ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,					       RT_SCOPE_LINK);	}	ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);	if (unlikely(net_xmit_eval(ret)))		vrf_dev->stats.tx_errors++;	else		ret = NET_XMIT_SUCCESS;out:	return ret;err:	vrf_tx_error(vrf_dev, skb);	goto out;}static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev){	switch (skb->protocol) {	case htons(ETH_P_IP):		return vrf_process_v4_outbound(skb, dev);	case htons(ETH_P_IPV6):		return vrf_process_v6_outbound(skb, dev);	default:		vrf_tx_error(dev, skb);		return NET_XMIT_DROP;	}}static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev){	netdev_tx_t ret = is_ip_tx_frame(skb, dev);	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);		u64_stats_update_begin(&dstats->syncp);		dstats->tx_pkts++;		dstats->tx_bytes += skb->len;		u64_stats_update_end(&dstats->syncp);	} else {		this_cpu_inc(dev->dstats->tx_drps);//.........这里部分代码省略.........
开发者ID:AshishNamdev,项目名称:linux,代码行数:101,


示例20: geneve_rx

/* geneve receive/decap routine */static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb){	struct genevehdr *gnvh = geneve_hdr(skb);	struct metadata_dst *tun_dst;	struct geneve_dev *geneve = NULL;#ifdef HAVE_DEV_TSTATS	struct pcpu_sw_netstats *stats;#endif	struct iphdr *iph;	u8 *vni;	__be32 addr;	int err;	union {		struct metadata_dst dst;		char buf[sizeof(struct metadata_dst) + 256];	} buf;	iph = ip_hdr(skb); /* outer IP header... */	if (gs->collect_md) {		static u8 zero_vni[3];		vni = zero_vni;		addr = 0;	} else {		vni = gnvh->vni;		addr = iph->saddr;	}	geneve = geneve_lookup(gs, addr, vni);	if (!geneve)		goto drop;	if (ip_tunnel_collect_metadata() || gs->collect_md) {		__be16 flags;		flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |			(gnvh->oam ? TUNNEL_OAM : 0) |			(gnvh->critical ? TUNNEL_CRIT_OPT : 0);		tun_dst = &buf.dst;		ovs_udp_tun_rx_dst(&tun_dst->u.tun_info, skb, AF_INET, flags,				   vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4);		/* Update tunnel dst according to Geneve options. */		ip_tunnel_info_opts_set(&tun_dst->u.tun_info,					gnvh->options, gnvh->opt_len * 4);	} else {		/* Drop packets w/ critical options,		 * since we don't support any...		 */		tun_dst = NULL;		if (gnvh->critical)			goto drop;	}	skb_reset_mac_header(skb);	skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));	skb->protocol = eth_type_trans(skb, geneve->dev);	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);	if (tun_dst)		ovs_skb_dst_set(skb, &tun_dst->dst);	else		goto drop;	/* Ignore packet loops (and multicast echo) */	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))		goto drop;	skb_reset_network_header(skb);	err = IP_ECN_decapsulate(iph, skb);	if (unlikely(err)) {		if (err > 1) {			++geneve->dev->stats.rx_frame_errors;			++geneve->dev->stats.rx_errors;			goto drop;		}	}#ifdef HAVE_DEV_TSTATS	stats = this_cpu_ptr((struct pcpu_sw_netstats __percpu *)geneve->dev->tstats);	u64_stats_update_begin(&stats->syncp);	stats->rx_packets++;	stats->rx_bytes += skb->len;	u64_stats_update_end(&stats->syncp);#endif	netdev_port_receive(skb, &tun_dst->u.tun_info);	return;drop:	/* Consume bad packet */	kfree_skb(skb);}
开发者ID:AlexanderFroemmgen,项目名称:ovs,代码行数:94,


示例21: vlan_do_receive

bool vlan_do_receive(struct sk_buff **skbp, bool last_handler){	struct sk_buff *skb = *skbp;	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;	struct net_device *vlan_dev;	struct vlan_pcpu_stats *rx_stats;	vlan_dev = vlan_find_dev(skb->dev, vlan_id);	if (!vlan_dev) {		/*                                                                                          */		if (vlan_id && last_handler)			skb->pkt_type = PACKET_OTHERHOST;		return false;	}	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);	if (unlikely(!skb))		return false;	skb->dev = vlan_dev;	if (skb->pkt_type == PACKET_OTHERHOST) {		/*                                                                                                                                                                        */		if (!compare_ether_addr(eth_hdr(skb)->h_dest,					vlan_dev->dev_addr))			skb->pkt_type = PACKET_HOST;	}	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {		unsigned int offset = skb->data - skb_mac_header(skb);		/*                                                                                                                                                         */		skb_push(skb, offset);		skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);		if (!skb)			return false;		skb_pull(skb, offset + VLAN_HLEN);		skb_reset_mac_len(skb);	}	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);	skb->vlan_tci = 0;	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);	u64_stats_update_begin(&rx_stats->syncp);	rx_stats->rx_packets++;	rx_stats->rx_bytes += skb->len;	if (skb->pkt_type == PACKET_MULTICAST)		rx_stats->rx_multicast++;	u64_stats_update_end(&rx_stats->syncp);	return true;}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:61,


示例22: netvsc_start_xmit

//.........这里部分代码省略.........		goto do_send;	rndis_msg_size += NDIS_CSUM_PPI_SIZE;	ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,			    TCPIP_CHKSUM_PKTINFO);	csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +			ppi->ppi_offset);	if (net_trans_info & (INFO_IPV4 << 16))		csum_info->transmit.is_ipv4 = 1;	else		csum_info->transmit.is_ipv6 = 1;	if (net_trans_info & INFO_TCP) {		csum_info->transmit.tcp_checksum = 1;		csum_info->transmit.tcp_header_offset = hdr_offset;	} else if (net_trans_info & INFO_UDP) {		/* UDP checksum offload is not supported on ws2008r2.		 * Furthermore, on ws2012 and ws2012r2, there are some		 * issues with udp checksum offload from Linux guests.		 * (these are host issues).		 * For now compute the checksum here.		 */		struct udphdr *uh;		u16 udp_len;		ret = skb_cow_head(skb, 0);		if (ret)			goto drop;		uh = udp_hdr(skb);		udp_len = ntohs(uh->len);		uh->check = 0;		uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,					      ip_hdr(skb)->daddr,					      udp_len, IPPROTO_UDP,					      csum_partial(uh, udp_len, 0));		if (uh->check == 0)			uh->check = CSUM_MANGLED_0;		csum_info->transmit.udp_checksum = 0;	}	goto do_send;do_lso:	rndis_msg_size += NDIS_LSO_PPI_SIZE;	ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,			    TCP_LARGESEND_PKTINFO);	lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +			ppi->ppi_offset);	lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;	if (net_trans_info & (INFO_IPV4 << 16)) {		lso_info->lso_v2_transmit.ip_version =			NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;		ip_hdr(skb)->tot_len = 0;		ip_hdr(skb)->check = 0;		tcp_hdr(skb)->check =		~csum_tcpudp_magic(ip_hdr(skb)->saddr,				   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);	} else {		lso_info->lso_v2_transmit.ip_version =			NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;		ipv6_hdr(skb)->payload_len = 0;		tcp_hdr(skb)->check =		~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,				&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);	}	lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;	lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;do_send:	/* Start filling in the page buffers with the rndis hdr */	rndis_msg->msg_len += rndis_msg_size;	packet->total_data_buflen = rndis_msg->msg_len;	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,					       skb, packet, &pb);	/* timestamp packet in software */	skb_tx_timestamp(skb);	ret = netvsc_send(net_device_ctx->device_ctx, packet,			  rndis_msg, &pb, skb);drop:	if (ret == 0) {		u64_stats_update_begin(&tx_stats->syncp);		tx_stats->packets++;		tx_stats->bytes += skb_length;		u64_stats_update_end(&tx_stats->syncp);	} else {		if (ret != -EAGAIN) {			dev_kfree_skb_any(skb);			net->stats.tx_dropped++;		}	}	return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;}
开发者ID:513855417,项目名称:linux,代码行数:101,


示例23: netvsc_recv_callback

/* * netvsc_recv_callback -  Callback when we receive a packet from the * "wire" on the specified device. */int netvsc_recv_callback(struct hv_device *device_obj,				struct hv_netvsc_packet *packet,				void **data,				struct ndis_tcp_ip_checksum_info *csum_info,				struct vmbus_channel *channel,				u16 vlan_tci){	struct net_device *net = hv_get_drvdata(device_obj);	struct net_device_context *net_device_ctx = netdev_priv(net);	struct sk_buff *skb;	struct sk_buff *vf_skb;	struct netvsc_stats *rx_stats;	struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;	u32 bytes_recvd = packet->total_data_buflen;	int ret = 0;	if (!net || net->reg_state != NETREG_REGISTERED)		return NVSP_STAT_FAIL;	if (READ_ONCE(netvsc_dev->vf_inject)) {		atomic_inc(&netvsc_dev->vf_use_cnt);		if (!READ_ONCE(netvsc_dev->vf_inject)) {			/*			 * We raced; just move on.			 */			atomic_dec(&netvsc_dev->vf_use_cnt);			goto vf_injection_done;		}		/*		 * Inject this packet into the VF inerface.		 * On Hyper-V, multicast and brodcast packets		 * are only delivered on the synthetic interface		 * (after subjecting these to policy filters on		 * the host). Deliver these via the VF interface		 * in the guest.		 */		vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,					       csum_info, *data, vlan_tci);		if (vf_skb != NULL) {			++netvsc_dev->vf_netdev->stats.rx_packets;			netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;			netif_receive_skb(vf_skb);		} else {			++net->stats.rx_dropped;			ret = NVSP_STAT_FAIL;		}		atomic_dec(&netvsc_dev->vf_use_cnt);		return ret;	}vf_injection_done:	net_device_ctx = netdev_priv(net);	rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);	/* Allocate a skb - TODO direct I/O to pages? */	skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);	if (unlikely(!skb)) {		++net->stats.rx_dropped;		return NVSP_STAT_FAIL;	}	skb_record_rx_queue(skb, channel->			    offermsg.offer.sub_channel_index);	u64_stats_update_begin(&rx_stats->syncp);	rx_stats->packets++;	rx_stats->bytes += packet->total_data_buflen;	u64_stats_update_end(&rx_stats->syncp);	/*	 * Pass the skb back up. Network stack will deallocate the skb when it	 * is done.	 * TODO - use NAPI?	 */	netif_rx(skb);	return 0;}
开发者ID:513855417,项目名称:linux,代码行数:82,


示例24: vlan_skb_recv

/* *	Determine the packet's protocol ID. The rule here is that we *	assume 802.3 if the type field is short enough to be a length. *	This is normal practice and works for any 'now in use' protocol. * *  Also, at this point we assume that we ARE dealing exclusively with *  VLAN packets, or packets that should be made into VLAN packets based *  on a default VLAN ID. * *  NOTE:  Should be similar to ethernet/eth.c. * *  SANITY NOTE:  This method is called when a packet is moving up the stack *                towards userland.  To get here, it would have already passed *                through the ethernet/eth.c eth_type_trans() method. *  SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be *                 stored UNALIGNED in the memory.  RISC systems don't like *                 such cases very much... *  SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be *  		    aligned, so there doesn't need to be any of the unaligned *  		    stuff.  It has been commented out now...  --Ben * */int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,		  struct packet_type *ptype, struct net_device *orig_dev){	struct vlan_hdr *vhdr;	struct vlan_pcpu_stats *rx_stats;	struct net_device *vlan_dev;	u16 vlan_id;	u16 vlan_tci;	skb = skb_share_check(skb, GFP_ATOMIC);	if (skb == NULL)		goto err_free;	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))		goto err_free;	vhdr = (struct vlan_hdr *)skb->data;	vlan_tci = ntohs(vhdr->h_vlan_TCI);	vlan_id = vlan_tci & VLAN_VID_MASK;	rcu_read_lock();	vlan_dev = vlan_find_dev(dev, vlan_id);	/* If the VLAN device is defined, we use it.	 * If not, and the VID is 0, it is a 802.1p packet (not	 * really a VLAN), so we will just netif_rx it later to the	 * original interface, but with the skb->proto set to the	 * wrapped proto: we do nothing here.	 */	if (!vlan_dev) {		if (vlan_id) {			pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s/n",				 __func__, vlan_id, dev->name);			goto err_unlock;		}		rx_stats = NULL;	} else {		skb->dev = vlan_dev;		rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);		u64_stats_update_begin(&rx_stats->syncp);		rx_stats->rx_packets++;		rx_stats->rx_bytes += skb->len;		skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);		pr_debug("%s: priority: %u for TCI: %hu/n",			 __func__, skb->priority, vlan_tci);		switch (skb->pkt_type) {		case PACKET_BROADCAST:			/* Yeah, stats collect these together.. */			/* stats->broadcast ++; // no such counter :-( */			break;		case PACKET_MULTICAST:			rx_stats->rx_multicast++;			break;		case PACKET_OTHERHOST:			/* Our lower layer thinks this is not local, let's make			 * sure.			 * This allows the VLAN to have a different MAC than the			 * underlying device, and still route correctly.			 */			if (!compare_ether_addr(eth_hdr(skb)->h_dest,						skb->dev->dev_addr))				skb->pkt_type = PACKET_HOST;			break;		default:			break;		}		u64_stats_update_end(&rx_stats->syncp);	}	skb_pull_rcsum(skb, VLAN_HLEN);//.........这里部分代码省略.........
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:101,


示例25: vlan_do_receive

bool vlan_do_receive(struct sk_buff **skbp, bool last_handler){	struct sk_buff *skb = *skbp;	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;	struct net_device *vlan_dev;	struct vlan_pcpu_stats *rx_stats;	vlan_dev = vlan_find_dev(skb->dev, vlan_id);	if (!vlan_dev) {		/* Only the last call to vlan_do_receive() should change		 * pkt_type to PACKET_OTHERHOST		 */		if (vlan_id && last_handler)			skb->pkt_type = PACKET_OTHERHOST;		return false;	}	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);	if (unlikely(!skb))		return false;	skb->dev = vlan_dev;	if (skb->pkt_type == PACKET_OTHERHOST) {		/* Our lower layer thinks this is not local, let's make sure.		 * This allows the VLAN to have a different MAC than the		 * underlying device, and still route correctly. */		if (!compare_ether_addr(eth_hdr(skb)->h_dest,					vlan_dev->dev_addr))			skb->pkt_type = PACKET_HOST;	}	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {		unsigned int offset = skb->data - skb_mac_header(skb);		/*		 * vlan_insert_tag expect skb->data pointing to mac header.		 * So change skb->data before calling it and change back to		 * original position later		 */		skb_push(skb, offset);		skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);		if (!skb)			return false;		skb_pull(skb, offset + VLAN_HLEN);		skb_reset_mac_len(skb);	}	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);	skb->vlan_tci = 0;	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);	u64_stats_update_begin(&rx_stats->syncp);	rx_stats->rx_packets++;	rx_stats->rx_bytes += skb->len;	if (skb->pkt_type == PACKET_MULTICAST)		rx_stats->rx_multicast++;	u64_stats_update_end(&rx_stats->syncp);	return true;}
开发者ID:davidmueller13,项目名称:davidskernel_lt03lte_tw_5.1.1,代码行数:61,


示例26: ip_tunnel_rcv

int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,		  const struct tnl_ptk_info *tpi, bool log_ecn_error){	struct pcpu_tstats *tstats;	const struct iphdr *iph = ip_hdr(skb);	int err;#ifdef CONFIG_NET_IPGRE_BROADCAST	if (ipv4_is_multicast(iph->daddr)) {		/* Looped back packet, drop it! */		if (rt_is_output_route(skb_rtable(skb)))			goto drop;		tunnel->dev->stats.multicast++;		skb->pkt_type = PACKET_BROADCAST;	}#endif	if ((!(tpi->flags&TUNNEL_CSUM) &&  (tunnel->parms.i_flags&TUNNEL_CSUM)) ||	     ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {		tunnel->dev->stats.rx_crc_errors++;		tunnel->dev->stats.rx_errors++;		goto drop;	}	if (tunnel->parms.i_flags&TUNNEL_SEQ) {		if (!(tpi->flags&TUNNEL_SEQ) ||		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {			tunnel->dev->stats.rx_fifo_errors++;			tunnel->dev->stats.rx_errors++;			goto drop;		}		tunnel->i_seqno = ntohl(tpi->seq) + 1;	}	err = IP_ECN_decapsulate(iph, skb);	if (unlikely(err)) {		if (log_ecn_error)			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x/n",					&iph->saddr, iph->tos);		if (err > 1) {			++tunnel->dev->stats.rx_frame_errors;			++tunnel->dev->stats.rx_errors;			goto drop;		}	}	tstats = this_cpu_ptr(tunnel->dev->tstats);	u64_stats_update_begin(&tstats->syncp);	tstats->rx_packets++;	tstats->rx_bytes += skb->len;	u64_stats_update_end(&tstats->syncp);	if (tunnel->net != dev_net(tunnel->dev))		skb_scrub_packet(skb);	if (tunnel->dev->type == ARPHRD_ETHER) {		skb->protocol = eth_type_trans(skb, tunnel->dev);		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);	} else {		skb->dev = tunnel->dev;	}	gro_cells_receive(&tunnel->gro_cells, skb);	return 0;drop:	kfree_skb(skb);	return 0;}
开发者ID:gbtian,项目名称:mpip,代码行数:68,


示例27: ri_tasklet

static void ri_tasklet(unsigned long dev){	struct net_device *_dev = (struct net_device *)dev;	struct ifb_private *dp = netdev_priv(_dev);	struct netdev_queue *txq;	struct sk_buff *skb;	txq = netdev_get_tx_queue(_dev, 0);	if ((skb = skb_peek(&dp->tq)) == NULL) {		if (__netif_tx_trylock(txq)) {			skb_queue_splice_tail_init(&dp->rq, &dp->tq);			__netif_tx_unlock(txq);		} else {			/* reschedule */			goto resched;		}	}	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {		u32 from = G_TC_FROM(skb->tc_verd);		skb->tc_verd = 0;		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);		u64_stats_update_begin(&dp->tsync);		dp->tx_packets++;		dp->tx_bytes += skb->len;		u64_stats_update_end(&dp->tsync);		rcu_read_lock();		skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);		if (!skb->dev) {			rcu_read_unlock();			dev_kfree_skb(skb);			_dev->stats.tx_dropped++;			if (skb_queue_len(&dp->tq) != 0)				goto resched;			break;		}		rcu_read_unlock();		skb->skb_iif = _dev->ifindex;		if (from & AT_EGRESS) {			dev_queue_xmit(skb);		} else if (from & AT_INGRESS) {			skb_pull(skb, skb->dev->hard_header_len);			netif_receive_skb(skb);		} else			BUG();	}	if (__netif_tx_trylock(txq)) {		if ((skb = skb_peek(&dp->rq)) == NULL) {			dp->tasklet_pending = 0;			if (netif_queue_stopped(_dev))				netif_wake_queue(_dev);		} else {			__netif_tx_unlock(txq);			goto resched;		}		__netif_tx_unlock(txq);	} else {resched:		dp->tasklet_pending = 1;		tasklet_schedule(&dp->ifb_tasklet);	}}
开发者ID:AiWinters,项目名称:linux,代码行数:68,



注:本文中的u64_stats_update_begin函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ u8函数代码示例
C++ u3r_mean函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。