aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/patches-3.3/721-phy_packets.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/patches-3.3/721-phy_packets.patch')
-rw-r--r--target/linux/generic/patches-3.3/721-phy_packets.patch175
1 files changed, 175 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.3/721-phy_packets.patch b/target/linux/generic/patches-3.3/721-phy_packets.patch
new file mode 100644
index 000000000..575fbaeaa
--- /dev/null
+++ b/target/linux/generic/patches-3.3/721-phy_packets.patch
@@ -0,0 +1,175 @@
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1078,6 +1078,11 @@ struct net_device {
+ const struct net_device_ops *netdev_ops;
+ const struct ethtool_ops *ethtool_ops;
+
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ void (*eth_mangle_rx)(struct net_device *dev, struct sk_buff *skb);
++ struct sk_buff *(*eth_mangle_tx)(struct net_device *dev, struct sk_buff *skb);
++#endif
++
+ /* Hardware header description */
+ const struct header_ops *header_ops;
+
+@@ -1134,6 +1139,9 @@ struct net_device {
+ void *ax25_ptr; /* AX.25 specific data */
+ struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
+ assign before registering */
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ void *phy_ptr; /* PHY device specific data */
++#endif
+
+ /*
+ * Cache lines mostly used on receive path (including eth_type_trans())
+--- a/include/linux/if.h
++++ b/include/linux/if.h
+@@ -80,6 +80,7 @@
+ * skbs on transmit */
+ #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
+ #define IFF_TEAM_PORT 0x40000 /* device used as team port */
++#define IFF_NO_IP_ALIGN 0x80000 /* do not ip-align allocated rx pkts */
+
+ #define IF_GET_IFACE 0x0001 /* for querying only */
+ #define IF_GET_PROTO 0x0002
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1661,6 +1661,10 @@ extern struct sk_buff *dev_alloc_skb(uns
+ extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+ unsigned int length, gfp_t gfp_mask);
+
++extern struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
++ unsigned int length, gfp_t gfp);
++
++
+ /**
+ * netdev_alloc_skb - allocate an skbuff for rx on a specific device
+ * @dev: network device to receive on
+@@ -1680,16 +1684,6 @@ static inline struct sk_buff *netdev_all
+ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+ }
+
+-static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
+- unsigned int length, gfp_t gfp)
+-{
+- struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
+-
+- if (NET_IP_ALIGN && skb)
+- skb_reserve(skb, NET_IP_ALIGN);
+- return skb;
+-}
+-
+ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length)
+ {
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -23,6 +23,12 @@ menuconfig NET
+
+ if NET
+
++config ETHERNET_PACKET_MANGLE
++ bool
++ help
++ This option can be selected by phy drivers that need to mangle
++ packets going in or out of an ethernet device.
++
+ config WANT_COMPAT_NETLINK_MESSAGES
+ bool
+ help
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2267,9 +2267,19 @@ int dev_hard_start_xmit(struct sk_buff *
+ }
+ }
+
+- skb_len = skb->len;
+- rc = ops->ndo_start_xmit(skb, dev);
+- trace_net_dev_xmit(skb, rc, dev, skb_len);
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (!dev->eth_mangle_tx ||
++ (skb = dev->eth_mangle_tx(dev, skb)) != NULL)
++#else
++ if (1)
++#endif
++ {
++ skb_len = skb->len;
++ rc = ops->ndo_start_xmit(skb, dev);
++ trace_net_dev_xmit(skb, rc, dev, skb_len);
++ } else {
++ rc = NETDEV_TX_OK;
++ }
+ if (rc == NETDEV_TX_OK)
+ txq_trans_update(txq);
+ return rc;
+@@ -2289,9 +2299,19 @@ gso:
+ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+ skb_dst_drop(nskb);
+
+- skb_len = nskb->len;
+- rc = ops->ndo_start_xmit(nskb, dev);
+- trace_net_dev_xmit(nskb, rc, dev, skb_len);
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (!dev->eth_mangle_tx ||
++ (nskb = dev->eth_mangle_tx(dev, nskb)) != NULL)
++#else
++ if (1)
++#endif
++ {
++ skb_len = nskb->len;
++ rc = ops->ndo_start_xmit(nskb, dev);
++ trace_net_dev_xmit(nskb, rc, dev, skb_len);
++ } else {
++ rc = NETDEV_TX_OK;
++ }
+ if (unlikely(rc != NETDEV_TX_OK)) {
+ if (rc & ~NETDEV_TX_MASK)
+ goto out_kfree_gso_skb;
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -58,6 +58,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/errqueue.h>
+ #include <linux/prefetch.h>
++#include <linux/if.h>
+
+ #include <net/protocol.h>
+ #include <net/dst.h>
+@@ -320,6 +321,22 @@ struct sk_buff *__netdev_alloc_skb(struc
+ }
+ EXPORT_SYMBOL(__netdev_alloc_skb);
+
++struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
++ unsigned int length, gfp_t gfp)
++{
++ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
++
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (dev->priv_flags & IFF_NO_IP_ALIGN)
++ return skb;
++#endif
++
++ if (NET_IP_ALIGN && skb)
++ skb_reserve(skb, NET_IP_ALIGN);
++ return skb;
++}
++EXPORT_SYMBOL(__netdev_alloc_skb_ip_align);
++
+ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size)
+ {
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -160,6 +160,12 @@ __be16 eth_type_trans(struct sk_buff *sk
+ struct ethhdr *eth;
+
+ skb->dev = dev;
++
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (dev->eth_mangle_rx)
++ dev->eth_mangle_rx(dev, skb);
++#endif
++
+ skb_reset_mac_header(skb);
+ skb_pull_inline(skb, ETH_HLEN);
+ eth = eth_hdr(skb);