forked from luck/tmp_suning_uos_patched
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2015-07-17 This series contains updates to igb, ixgbe, ixgbevf, i40e, bnx2x, freescale, siena and dp83640. Jacob provides several patches to clarify the intended way to implement both SIOCSHWTSTAMP and ethtool's get_ts_info(). It is okay to support the specific filters in SIOCSHWTSTAMP by upscaling them to the generic filters. Alex Duyck provides a igb patch to pull the time stamp from the fragment before it gets added to the skb, to avoid a possible issue in which the fragment can possibly be less than IGB_RX_HDR_LEN due to the time stamp being pulled after the copybreak check. Also provides a ixgbevf patch to fold the ixgbevf_pull_tail() call into ixgbevf_add_rx_frag(), which gives the advantage that the fragment does not have to be modified after it is added to the skb. Fan provides patches for ixgbe/ixgbevf to set the receive hash type based on receive descriptor RSS type. Todd provides a fix for igb where on check for link on any media other than copper was not being detected since it was looking on the incorrect PHY page (due to the page being used gets switched before the function to check link gets executed). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f3120acc78
|
@ -359,6 +359,13 @@ the requested fine-grained filtering for incoming packets is not
|
|||
supported, the driver may time stamp more than just the requested types
|
||||
of packets.
|
||||
|
||||
Drivers are free to use a more permissive configuration than the requested
|
||||
configuration. It is expected that drivers should only implement directly the
|
||||
most generic mode that can be supported. For example if the hardware can
|
||||
support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
|
||||
is more generic (and more useful to applications).
|
||||
|
||||
A driver which supports hardware time stamping shall update the struct
|
||||
with the actual, possibly more permissive configuration. If the
|
||||
requested packets cannot be time stamped, then nothing should be
|
||||
|
|
|
@ -3562,17 +3562,8 @@ static int bnx2x_get_ts_info(struct net_device *dev,
|
|||
|
||||
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
|
||||
info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
|
||||
|
||||
|
|
|
@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
|
|||
break;
|
||||
|
||||
default:
|
||||
/*
|
||||
* register RXMTRL must be set in order to do V1 packets,
|
||||
* therefore it is not possible to time stamp both V1 Sync and
|
||||
* Delay_Req messages and hardware does not support
|
||||
* timestamping all packets => return error
|
||||
*/
|
||||
fep->hwts_rx_en = 1;
|
||||
config.rx_filter = HWTSTAMP_FILTER_ALL;
|
||||
break;
|
||||
|
|
|
@ -1467,17 +1467,8 @@ static int i40e_get_ts_info(struct net_device *dev,
|
|||
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
|
||||
|
||||
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* reset page to 0 */
|
||||
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
if (data & E1000_M88E1112_STATUS_LINK)
|
||||
port = E1000_MEDIA_PORT_OTHER;
|
||||
|
@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
|
|||
if (port && (hw->dev_spec._82575.media_port != port)) {
|
||||
hw->dev_spec._82575.media_port = port;
|
||||
hw->dev_spec._82575.media_changed = true;
|
||||
}
|
||||
|
||||
if (port == E1000_MEDIA_PORT_COPPER) {
|
||||
/* reset page to 0 */
|
||||
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
igb_check_for_link_82575(hw);
|
||||
} else {
|
||||
ret_val = igb_check_for_link_82575(hw);
|
||||
igb_check_for_link_82575(hw);
|
||||
/* reset page to 0 */
|
||||
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2396,10 +2396,6 @@ static int igb_get_ts_info(struct net_device *dev,
|
|||
info->rx_filters |=
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -6621,22 +6621,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct page *page = rx_buffer->page;
|
||||
unsigned char *va = page_address(page) + rx_buffer->page_offset;
|
||||
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = IGB_RX_BUFSZ;
|
||||
#else
|
||||
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
|
||||
unsigned int truesize = SKB_DATA_ALIGN(size);
|
||||
#endif
|
||||
unsigned int pull_len;
|
||||
|
||||
if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
|
||||
unsigned char *va = page_address(page) + rx_buffer->page_offset;
|
||||
if (unlikely(skb_is_nonlinear(skb)))
|
||||
goto add_tail_frag;
|
||||
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
||||
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
|
||||
va += IGB_TS_HDR_LEN;
|
||||
size -= IGB_TS_HDR_LEN;
|
||||
}
|
||||
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
|
||||
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
|
||||
va += IGB_TS_HDR_LEN;
|
||||
size -= IGB_TS_HDR_LEN;
|
||||
}
|
||||
|
||||
if (likely(size <= IGB_RX_HDR_LEN)) {
|
||||
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
||||
|
||||
/* page is not reserved, we can reuse buffer as-is */
|
||||
|
@ -6648,8 +6651,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
|
|||
return false;
|
||||
}
|
||||
|
||||
/* we need the header to contain the greater of either ETH_HLEN or
|
||||
* 60 bytes if the skb->len is less than 60 for skb_pad.
|
||||
*/
|
||||
pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
|
||||
|
||||
/* update all of the pointers */
|
||||
va += pull_len;
|
||||
size -= pull_len;
|
||||
|
||||
add_tail_frag:
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
rx_buffer->page_offset, size, truesize);
|
||||
(unsigned long)va & ~PAGE_MASK, size, truesize);
|
||||
|
||||
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
|
||||
}
|
||||
|
@ -6790,62 +6806,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_pull_tail - igb specific version of skb_pull_tail
|
||||
* @rx_ring: rx descriptor ring packet is being transacted on
|
||||
* @rx_desc: pointer to the EOP Rx descriptor
|
||||
* @skb: pointer to current skb being adjusted
|
||||
*
|
||||
* This function is an igb specific version of __pskb_pull_tail. The
|
||||
* main difference between this version and the original function is that
|
||||
* this function can make several assumptions about the state of things
|
||||
* that allow for significant optimizations versus the standard function.
|
||||
* As a result we can do things like drop a frag and maintain an accurate
|
||||
* truesize for the skb.
|
||||
*/
|
||||
static void igb_pull_tail(struct igb_ring *rx_ring,
|
||||
union e1000_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
||||
unsigned char *va;
|
||||
unsigned int pull_len;
|
||||
|
||||
/* it is valid to use page_address instead of kmap since we are
|
||||
* working with pages allocated out of the lomem pool per
|
||||
* alloc_page(GFP_ATOMIC)
|
||||
*/
|
||||
va = skb_frag_address(frag);
|
||||
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
||||
/* retrieve timestamp from buffer */
|
||||
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
|
||||
|
||||
/* update pointers to remove timestamp header */
|
||||
skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
|
||||
frag->page_offset += IGB_TS_HDR_LEN;
|
||||
skb->data_len -= IGB_TS_HDR_LEN;
|
||||
skb->len -= IGB_TS_HDR_LEN;
|
||||
|
||||
/* move va to start of packet data */
|
||||
va += IGB_TS_HDR_LEN;
|
||||
}
|
||||
|
||||
/* we need the header to contain the greater of either ETH_HLEN or
|
||||
* 60 bytes if the skb->len is less than 60 for skb_pad.
|
||||
*/
|
||||
pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
|
||||
|
||||
/* update all of the pointers */
|
||||
skb_frag_size_sub(frag, pull_len);
|
||||
frag->page_offset += pull_len;
|
||||
skb->data_len -= pull_len;
|
||||
skb->tail += pull_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_cleanup_headers - Correct corrupted or empty headers
|
||||
* @rx_ring: rx descriptor ring packet is being transacted on
|
||||
|
@ -6873,10 +6833,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
|
|||
}
|
||||
}
|
||||
|
||||
/* place header in linear portion of buffer */
|
||||
if (skb_is_nonlinear(skb))
|
||||
igb_pull_tail(rx_ring, rx_desc, skb);
|
||||
|
||||
/* if eth_skb_pad returns an error the skb was freed */
|
||||
if (eth_skb_pad(skb))
|
||||
return true;
|
||||
|
|
|
@ -1394,14 +1394,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
|
|||
/*
|
||||
* Continue setup of fdirctrl register bits:
|
||||
* Turn perfect match filtering on
|
||||
* Report hash in RSS field of Rx wb descriptor
|
||||
* Initialize the drop queue
|
||||
* Move the flexible bytes to use the ethertype - shift 6 words
|
||||
* Set the maximum length per hash bucket to 0xA filters
|
||||
* Send interrupt when 64 (0x4 * 16) filters are left
|
||||
*/
|
||||
fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
|
||||
IXGBE_FDIRCTRL_REPORT_STATUS |
|
||||
(IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
|
||||
(0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
|
||||
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
|
||||
|
|
|
@ -2938,14 +2938,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
|
|||
(1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1360,14 +1360,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
|
|||
}
|
||||
|
||||
#endif /* CONFIG_IXGBE_DCA */
|
||||
|
||||
#define IXGBE_RSS_L4_TYPES_MASK \
|
||||
((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
|
||||
(1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
|
||||
(1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
|
||||
(1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
|
||||
|
||||
static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (ring->netdev->features & NETIF_F_RXHASH)
|
||||
skb_set_hash(skb,
|
||||
le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
|
||||
PKT_HASH_TYPE_L3);
|
||||
u16 rss_type;
|
||||
|
||||
if (!(ring->netdev->features & NETIF_F_RXHASH))
|
||||
return;
|
||||
|
||||
rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
|
||||
IXGBE_RXDADV_RSSTYPE_MASK;
|
||||
|
||||
if (!rss_type)
|
||||
return;
|
||||
|
||||
skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
|
||||
(IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
|
||||
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
|
||||
}
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
|
|
|
@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed;
|
|||
#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
|
||||
#define IXGBE_RXDADV_SPH 0x8000
|
||||
|
||||
/* RSS Hash results */
|
||||
#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
|
||||
#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
|
||||
|
||||
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
|
||||
IXGBE_RXD_ERR_CE | \
|
||||
IXGBE_RXD_ERR_LE | \
|
||||
|
|
|
@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
|
|||
napi_gro_receive(&q_vector->napi, skb);
|
||||
}
|
||||
|
||||
#define IXGBE_RSS_L4_TYPES_MASK \
|
||||
((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
|
||||
(1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
|
||||
(1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
|
||||
(1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
|
||||
|
||||
static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u16 rss_type;
|
||||
|
||||
if (!(ring->netdev->features & NETIF_F_RXHASH))
|
||||
return;
|
||||
|
||||
rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
|
||||
IXGBE_RXDADV_RSSTYPE_MASK;
|
||||
|
||||
if (!rss_type)
|
||||
return;
|
||||
|
||||
skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
|
||||
(IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
|
||||
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
|
||||
* @ring: structure containig ring specific data
|
||||
|
@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
|
|||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
ixgbevf_rx_hash(rx_ring, rx_desc, skb);
|
||||
ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
|
||||
|
||||
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
|
||||
|
@ -648,46 +675,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
|
||||
* @rx_ring: rx descriptor ring packet is being transacted on
|
||||
* @skb: pointer to current skb being adjusted
|
||||
*
|
||||
* This function is an ixgbevf specific version of __pskb_pull_tail. The
|
||||
* main difference between this version and the original function is that
|
||||
* this function can make several assumptions about the state of things
|
||||
* that allow for significant optimizations versus the standard function.
|
||||
* As a result we can do things like drop a frag and maintain an accurate
|
||||
* truesize for the skb.
|
||||
**/
|
||||
static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
||||
unsigned char *va;
|
||||
unsigned int pull_len;
|
||||
|
||||
/* it is valid to use page_address instead of kmap since we are
|
||||
* working with pages allocated out of the lomem pool per
|
||||
* alloc_page(GFP_ATOMIC)
|
||||
*/
|
||||
va = skb_frag_address(frag);
|
||||
|
||||
/* we need the header to contain the greater of either ETH_HLEN or
|
||||
* 60 bytes if the skb->len is less than 60 for skb_pad.
|
||||
*/
|
||||
pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
|
||||
|
||||
/* update all of the pointers */
|
||||
skb_frag_size_sub(frag, pull_len);
|
||||
frag->page_offset += pull_len;
|
||||
skb->data_len -= pull_len;
|
||||
skb->tail += pull_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbevf_cleanup_headers - Correct corrupted or empty headers
|
||||
* @rx_ring: rx descriptor ring packet is being transacted on
|
||||
|
@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
|
|||
}
|
||||
}
|
||||
|
||||
/* place header in linear portion of buffer */
|
||||
if (skb_is_nonlinear(skb))
|
||||
ixgbevf_pull_tail(rx_ring, skb);
|
||||
|
||||
/* if eth_skb_pad returns an error the skb was freed */
|
||||
if (eth_skb_pad(skb))
|
||||
return true;
|
||||
|
@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct page *page = rx_buffer->page;
|
||||
unsigned char *va = page_address(page) + rx_buffer->page_offset;
|
||||
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = IXGBEVF_RX_BUFSZ;
|
||||
#else
|
||||
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
|
||||
#endif
|
||||
unsigned int pull_len;
|
||||
|
||||
if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
|
||||
unsigned char *va = page_address(page) + rx_buffer->page_offset;
|
||||
if (unlikely(skb_is_nonlinear(skb)))
|
||||
goto add_tail_frag;
|
||||
|
||||
if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
|
||||
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
||||
|
||||
/* page is not reserved, we can reuse buffer as is */
|
||||
|
@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
|
|||
return false;
|
||||
}
|
||||
|
||||
/* we need the header to contain the greater of either ETH_HLEN or
|
||||
* 60 bytes if the skb->len is less than 60 for skb_pad.
|
||||
*/
|
||||
pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
|
||||
|
||||
/* update all of the pointers */
|
||||
va += pull_len;
|
||||
size -= pull_len;
|
||||
|
||||
add_tail_frag:
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
rx_buffer->page_offset, size, truesize);
|
||||
(unsigned long)va & ~PAGE_MASK, size, truesize);
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(ixgbevf_page_is_reserved(page)))
|
||||
|
|
|
@ -1042,9 +1042,5 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
|
||||
.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
|
||||
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
|
||||
1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
|
||||
1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
|
||||
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
|
||||
1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
|
||||
1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
|
||||
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
|
||||
};
|
||||
|
|
|
@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
|
|||
info->rx_filters =
|
||||
(1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1093,6 +1093,11 @@ struct ethtool_sfeatures {
|
|||
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
|
||||
* respectively. For example, if the device supports HWTSTAMP_TX_ON,
|
||||
* then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
|
||||
*
|
||||
* Drivers should only report the filters they actually support without
|
||||
* upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
|
||||
* HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
|
||||
* driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
|
||||
*/
|
||||
struct ethtool_ts_info {
|
||||
__u32 cmd;
|
||||
|
|
Loading…
Reference in New Issue
Block a user