forked from luck/tmp_suning_uos_patched
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
This commit is contained in:
commit
1397e171f1
@ -7966,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
|
||||
|
||||
/* AER (Advanced Error Reporting) hooks */
|
||||
err = pci_enable_pcie_error_reporting(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
|
||||
"failed 0x%x\n", err);
|
||||
/* non-fatal, continue */
|
||||
}
|
||||
if (!err)
|
||||
bp->flags |= BNX2_FLAG_AER_ENABLED;
|
||||
|
||||
} else {
|
||||
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
|
||||
@ -8233,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
|
||||
return 0;
|
||||
|
||||
err_out_unmap:
|
||||
if (bp->flags & BNX2_FLAG_PCIE)
|
||||
if (bp->flags & BNX2_FLAG_AER_ENABLED) {
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
bp->flags &= ~BNX2_FLAG_AER_ENABLED;
|
||||
}
|
||||
|
||||
if (bp->regview) {
|
||||
iounmap(bp->regview);
|
||||
@ -8422,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev)
|
||||
|
||||
kfree(bp->temp_stats_blk);
|
||||
|
||||
if (bp->flags & BNX2_FLAG_PCIE)
|
||||
if (bp->flags & BNX2_FLAG_AER_ENABLED) {
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
bp->flags &= ~BNX2_FLAG_AER_ENABLED;
|
||||
}
|
||||
|
||||
free_netdev(dev);
|
||||
|
||||
@ -8539,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
if (!(bp->flags & BNX2_FLAG_PCIE))
|
||||
if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
|
||||
return result;
|
||||
|
||||
err = pci_cleanup_aer_uncorrect_error_status(pdev);
|
||||
|
@ -6741,6 +6741,7 @@ struct bnx2 {
|
||||
#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
|
||||
#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
|
||||
#define BNX2_FLAG_BROKEN_STATS 0x00002000
|
||||
#define BNX2_FLAG_AER_ENABLED 0x00004000
|
||||
|
||||
struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
|
||||
|
||||
|
@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
|
||||
static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
|
||||
{
|
||||
int i;
|
||||
u32 *page_table = dma->pgtbl;
|
||||
__le32 *page_table = (__le32 *) dma->pgtbl;
|
||||
|
||||
for (i = 0; i < dma->num_pages; i++) {
|
||||
/* Each entry needs to be in big endian format. */
|
||||
*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
|
||||
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
|
||||
page_table++;
|
||||
*page_table = (u32) dma->pg_map_arr[i];
|
||||
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
|
||||
page_table++;
|
||||
}
|
||||
}
|
||||
@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
|
||||
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
|
||||
{
|
||||
int i;
|
||||
u32 *page_table = dma->pgtbl;
|
||||
__le32 *page_table = (__le32 *) dma->pgtbl;
|
||||
|
||||
for (i = 0; i < dma->num_pages; i++) {
|
||||
/* Each entry needs to be in little endian format. */
|
||||
*page_table = dma->pg_map_arr[i] & 0xffffffff;
|
||||
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
|
||||
page_table++;
|
||||
*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
|
||||
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
|
||||
page_table++;
|
||||
}
|
||||
}
|
||||
|
@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
|
||||
|
||||
/* Free all the skbuffs in the queue. */
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
np->rx_ring[i].status = 0;
|
||||
np->rx_ring[i].fraginfo = 0;
|
||||
skb = np->rx_skbuff[i];
|
||||
if (skb) {
|
||||
pci_unmap_single(np->pdev,
|
||||
@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
|
||||
dev_kfree_skb (skb);
|
||||
np->rx_skbuff[i] = NULL;
|
||||
}
|
||||
np->rx_ring[i].status = 0;
|
||||
np->rx_ring[i].fraginfo = 0;
|
||||
}
|
||||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
skb = np->tx_skbuff[i];
|
||||
|
@ -120,6 +120,9 @@ struct netfront_info {
|
||||
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
|
||||
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
|
||||
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
|
||||
|
||||
/* Statistics */
|
||||
int rx_gso_checksum_fixup;
|
||||
};
|
||||
|
||||
struct netfront_rx_info {
|
||||
@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
|
||||
return cons;
|
||||
}
|
||||
|
||||
static int skb_checksum_setup(struct sk_buff *skb)
|
||||
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct iphdr *iph;
|
||||
unsigned char *th;
|
||||
int err = -EPROTO;
|
||||
int recalculate_partial_csum = 0;
|
||||
|
||||
/*
|
||||
* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
|
||||
* peers can fail to set NETRXF_csum_blank when sending a GSO
|
||||
* frame. In this case force the SKB to CHECKSUM_PARTIAL and
|
||||
* recalculate the partial checksum.
|
||||
*/
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
|
||||
struct netfront_info *np = netdev_priv(dev);
|
||||
np->rx_gso_checksum_fixup++;
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
recalculate_partial_csum = 1;
|
||||
}
|
||||
|
||||
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0;
|
||||
|
||||
if (skb->protocol != htons(ETH_P_IP))
|
||||
goto out;
|
||||
@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
|
||||
switch (iph->protocol) {
|
||||
case IPPROTO_TCP:
|
||||
skb->csum_offset = offsetof(struct tcphdr, check);
|
||||
|
||||
if (recalculate_partial_csum) {
|
||||
struct tcphdr *tcph = (struct tcphdr *)th;
|
||||
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
skb->len - iph->ihl*4,
|
||||
IPPROTO_TCP, 0);
|
||||
}
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
skb->csum_offset = offsetof(struct udphdr, check);
|
||||
|
||||
if (recalculate_partial_csum) {
|
||||
struct udphdr *udph = (struct udphdr *)th;
|
||||
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
skb->len - iph->ihl*4,
|
||||
IPPROTO_UDP, 0);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (net_ratelimit())
|
||||
@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
|
||||
/* Ethernet work: Delayed to here as it peeks the header. */
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
if (skb_checksum_setup(skb)) {
|
||||
kfree_skb(skb);
|
||||
packets_dropped++;
|
||||
dev->stats.rx_errors++;
|
||||
continue;
|
||||
}
|
||||
if (checksum_setup(dev, skb)) {
|
||||
kfree_skb(skb);
|
||||
packets_dropped++;
|
||||
dev->stats.rx_errors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
dev->stats.rx_packets++;
|
||||
@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static const struct xennet_stat {
|
||||
char name[ETH_GSTRING_LEN];
|
||||
u16 offset;
|
||||
} xennet_stats[] = {
|
||||
{
|
||||
"rx_gso_checksum_fixup",
|
||||
offsetof(struct netfront_info, rx_gso_checksum_fixup)
|
||||
},
|
||||
};
|
||||
|
||||
static int xennet_get_sset_count(struct net_device *dev, int string_set)
|
||||
{
|
||||
switch (string_set) {
|
||||
case ETH_SS_STATS:
|
||||
return ARRAY_SIZE(xennet_stats);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void xennet_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 * data)
|
||||
{
|
||||
void *np = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
|
||||
data[i] = *(int *)(np + xennet_stats[i].offset);
|
||||
}
|
||||
|
||||
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
|
||||
{
|
||||
int i;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
|
||||
memcpy(data + i * ETH_GSTRING_LEN,
|
||||
xennet_stats[i].name, ETH_GSTRING_LEN);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct ethtool_ops xennet_ethtool_ops =
|
||||
{
|
||||
.set_tx_csum = ethtool_op_set_tx_csum,
|
||||
.set_sg = xennet_set_sg,
|
||||
.set_tso = xennet_set_tso,
|
||||
.get_link = ethtool_op_get_link,
|
||||
|
||||
.get_sset_count = xennet_get_sset_count,
|
||||
.get_ethtool_stats = xennet_get_ethtool_stats,
|
||||
.get_strings = xennet_get_strings,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
@ -1122,8 +1122,7 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (af_ops->validate_link_af) {
|
||||
err = af_ops->validate_link_af(dev,
|
||||
tb[IFLA_AF_SPEC]);
|
||||
err = af_ops->validate_link_af(dev, af);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
shinfo = skb_shinfo(skb);
|
||||
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
|
||||
atomic_set(&shinfo->dataref, 1);
|
||||
kmemcheck_annotate_variable(shinfo->destructor_arg);
|
||||
|
||||
if (fclone) {
|
||||
struct sk_buff *child = skb + 1;
|
||||
|
@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result)
|
||||
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||
struct msghdr *msg, size_t len)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
|
||||
struct net_device *dev;
|
||||
struct ec_addr addr;
|
||||
int err;
|
||||
unsigned char port, cb;
|
||||
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
|
||||
struct sock *sk = sock->sk;
|
||||
struct sk_buff *skb;
|
||||
struct ec_cb *eb;
|
||||
#endif
|
||||
@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||
|
||||
error_free_buf:
|
||||
vfree(userbuf);
|
||||
error:
|
||||
#else
|
||||
err = -EPROTOTYPE;
|
||||
#endif
|
||||
error:
|
||||
mutex_unlock(&econet_mutex);
|
||||
|
||||
return err;
|
||||
|
@ -200,7 +200,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
|
||||
}
|
||||
dst_destroy_metrics_generic(dst);
|
||||
if (peer) {
|
||||
BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
|
||||
rt->rt6i_peer = NULL;
|
||||
inet_putpeer(peer);
|
||||
}
|
||||
@ -210,9 +209,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
|
||||
{
|
||||
struct inet_peer *peer;
|
||||
|
||||
if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
|
||||
return;
|
||||
|
||||
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
|
||||
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
|
||||
inet_putpeer(peer);
|
||||
|
Loading…
Reference in New Issue
Block a user