OpenWrt – Blame information for rev 4
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
4 | office | 1 | --- a/include/linux/skbuff.h |
2 | +++ b/include/linux/skbuff.h |
||
3 | @@ -738,6 +738,7 @@ struct sk_buff { |
||
4 | #endif |
||
5 | __u8 ipvs_property:1; |
||
6 | __u8 inner_protocol_type:1; |
||
7 | + __u8 fast_forwarded:1; |
||
8 | __u8 remcsum_offload:1; |
||
9 | #ifdef CONFIG_NET_SWITCHDEV |
||
10 | __u8 offload_fwd_mark:1; |
||
11 | --- a/net/core/dev.c |
||
12 | +++ b/net/core/dev.c |
||
13 | @@ -2947,8 +2947,14 @@ static int xmit_one(struct sk_buff *skb, |
||
14 | unsigned int len; |
||
15 | int rc; |
||
16 | |||
17 | - if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) |
||
18 | - dev_queue_xmit_nit(skb, dev); |
||
19 | + /* |
||
20 | + * If this skb has been fast forwarded then we don't want it to |
||
21 | + * go to any taps (by definition we're trying to bypass them). |
||
22 | + */ |
||
23 | + if (!skb->fast_forwarded) { |
||
24 | + if (!list_empty(&ptype_all)) |
||
25 | + dev_queue_xmit_nit(skb, dev); |
||
26 | + } |
||
27 | |||
28 | #ifdef CONFIG_ETHERNET_PACKET_MANGLE |
||
29 | if (!dev->eth_mangle_tx || |
||
30 | @@ -4137,6 +4143,9 @@ static inline int nf_ingress(struct sk_b |
||
31 | return 0; |
||
32 | } |
||
33 | |||
34 | +int (*fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly; |
||
35 | +EXPORT_SYMBOL_GPL(fast_nat_recv); |
||
36 | + |
||
37 | static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) |
||
38 | { |
||
39 | struct packet_type *ptype, *pt_prev; |
||
40 | @@ -4146,6 +4155,8 @@ static int __netif_receive_skb_core(stru |
||
41 | int ret = NET_RX_DROP; |
||
42 | __be16 type; |
||
43 | |||
44 | + int (*fast_recv)(struct sk_buff *skb); |
||
45 | + |
||
46 | net_timestamp_check(!netdev_tstamp_prequeue, skb); |
||
47 | |||
48 | trace_netif_receive_skb(skb); |
||
49 | @@ -4171,6 +4182,12 @@ another_round: |
||
50 | goto out; |
||
51 | } |
||
52 | |||
53 | + fast_recv = rcu_dereference(fast_nat_recv); |
||
54 | + if (fast_recv && fast_recv(skb)) { |
||
55 | + ret = NET_RX_SUCCESS; |
||
56 | + goto out; |
||
57 | + } |
||
58 | + |
||
59 | #ifdef CONFIG_NET_CLS_ACT |
||
60 | if (skb->tc_verd & TC_NCLS) { |
||
61 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); |
||
62 | --- a/net/netfilter/nf_conntrack_proto_tcp.c |
||
63 | +++ b/net/netfilter/nf_conntrack_proto_tcp.c |
||
64 | @@ -35,6 +35,7 @@ |
||
65 | |||
66 | /* Do not check the TCP window for incoming packets */ |
||
67 | static int nf_ct_tcp_no_window_check __read_mostly = 1; |
||
68 | +EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check); |
||
69 | |||
70 | /* "Be conservative in what you do, |
||
71 | be liberal in what you accept from others." |
||
72 | --- a/net/bridge/br_if.c |
||
73 | +++ b/net/bridge/br_if.c |
||
74 | @@ -655,3 +655,26 @@ void br_port_flags_change(struct net_bri |
||
75 | if (mask & BR_AUTO_MASK) |
||
76 | nbp_update_port_count(br); |
||
77 | } |
||
78 | +/* Update bridge statistics for bridge packets processed by offload engines */ |
||
79 | +void br_dev_update_stats(struct net_device *dev, struct rtnl_link_stats64 *nlstats) |
||
80 | +{ |
||
81 | + struct net_bridge *br; |
||
82 | + struct pcpu_sw_netstats *stats; |
||
83 | + |
||
84 | + /* |
||
85 | + * Is this a bridge? |
||
86 | + */ |
||
87 | + if (!(dev->priv_flags & IFF_EBRIDGE)) |
||
88 | + return; |
||
89 | + |
||
90 | + br = netdev_priv(dev); |
||
91 | + stats = per_cpu_ptr(br->stats, 0); |
||
92 | + |
||
93 | + u64_stats_update_begin(&stats->syncp); |
||
94 | + stats->rx_packets += nlstats->rx_packets; |
||
95 | + stats->rx_bytes += nlstats->rx_bytes; |
||
96 | + stats->tx_packets += nlstats->tx_packets; |
||
97 | + stats->tx_bytes += nlstats->tx_bytes; |
||
98 | + u64_stats_update_end(&stats->syncp); |
||
99 | +} |
||
100 | +EXPORT_SYMBOL_GPL(br_dev_update_stats); |