__u8 meta_len; // 元数据长度__u8 nr_frags; // 分片数量__u8 tx_flags; // 传输标志unsigned short gso_size; // 大分组分片(GSO)大小unsigned short gso_segs; // GSO 分段数struct sk_buff *frag_list; // 分片列表指针struct skb_shared_hwtstamps hwtstamps; // 硬件时间戳信息unsigned int...
int netif_receive_skb(struct sk_buff *skb) { net_timestamp_check(netdev_tstamp_prequeue, skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu, ret; rcu...
当缓冲区往下传播经过网络协议栈时,每个协议会将skb->data往下传,并将其报头拷贝进来,然后更新skb->len。 skb_push将一个数据块添加到缓冲区开端,skb_put将一个数据块添加到尾端,这些函数都没有真正将数据添加进缓冲区,只是简单地移动指向头和尾的指针,需要其他函数将数据复制进来。skb_pull将data指针后移,将一...
*/unsigned long dev_scratch;};};struct list_head list;//指向头节点};union{struct sock*sk;//报文所属的套接字int ip_defrag_offset;};union{ktime_t tstamp;//报文时间戳u64 skb_mstamp_ns;/* earliest departure time */};__u16 transport_header;//指向传输层协议首部的起始。__u16 network_h...
__skb_tstamp_tx(skb,NULL, skb->sk, SCM_TSTAMP_SCHED);/* Disable soft irqs for various locks below. Also * stops preemption for RCU. */rcu_read_lock_bh(); skb_update_prio(skb);/* If device/qdisc don't need skb->dst, release it right now while ...
staticint __dev_queue_xmit(struct sk_buff *skb,void *accel_priv) { struct net_device *dev = skb->dev; struct netdev_queue *txq; struct Qdisc *q; int rc = -ENOMEM; skb_reset_mac_header(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) ...
intnetif_receive_skb(struct sk_buff *skb) { net_timestamp_check(netdev_tstamp_prequeue, skb); if(skb_defer_rx_timestamp(skb)) returnNET_RX_SUCCESS; #ifdefCONFIG_RPS if(static_key_false(&rps_needed)) { structrps_dev_flowvoidflow, *rflow= &voidflow; ...
数据包先统一经过一个安装在 clsact Qdisc 出口方向上的 eBPF filter。这个 eBPF filter 程序包含着主要的限速逻辑;它会根据某一个数据包所属流量类别的带宽来计算这个数据包的预计发送时间,并给它盖一个时间戳 (skb->tstamp); 随后,数据包被分发到设备的各个硬件队列,且每个硬件队列都有一个自己的 fq Qdisc; ...
intnetif_receive_skb(struct sk_buff *skb) { net_timestamp_check(netdev_tstamp_prequeue, skb); if(skb_defer_rx_timestamp(skb)) returnNET_RX_SUCCESS; #ifdefCONFIG_RPS if(static_key_false(&rps_needed)) { structrps_dev_flowvoidflow, *rflow= &voidflow; ...
Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) Cleanup our debris for IP stacks memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), sizeof(struct inet6_skb_parm))); err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); ...