summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-04 14:54:56 +0100
committerIngo Molnar <mingo@elte.hu>2009-02-04 14:54:56 +0100
commitbb960a1e42042e82447a5bc0941b3ab6d614bac3 (patch)
treed2295a923fabb1b01b25bb015c4c2e42ee9df5ca /net
parent858770619debfb9269add63e4ba8b7c6b5538dd1 (diff)
parent06fc732c33a7ff5e4c91bcf4a6ca86b5e335ad9a (diff)
downloadkernel-crypto-bb960a1e42042e82447a5bc0941b3ab6d614bac3.tar.gz
kernel-crypto-bb960a1e42042e82447a5bc0941b3ab6d614bac3.tar.xz
kernel-crypto-bb960a1e42042e82447a5bc0941b3ab6d614bac3.zip
Merge branch 'core/xen' into x86/urgent
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/9p/Kconfig2
-rw-r--r--net/9p/client.c2
-rw-r--r--net/Kconfig8
-rw-r--r--net/bridge/br_netfilter.c18
-rw-r--r--net/bridge/netfilter/ebtables.c5
-rw-r--r--net/can/bcm.c57
-rw-r--r--net/core/dev.c76
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/skbuff.c91
-rw-r--r--net/dccp/ccids/Kconfig2
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/netfilter/iptable_filter.c7
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c6
-rw-r--r--net/ipv4/netfilter/iptable_raw.c6
-rw-r--r--net/ipv4/netfilter/iptable_security.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/tcp.c25
-rw-r--r--net/ipv4/udp.c55
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c7
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6_fib.c15
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6mr.c24
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/mesh_plink.c1
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rc80211_minstrel.c10
-rw-r--r--net/mac80211/sta_info.h1
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/x_tables.c8
-rw-r--r--net/netfilter/xt_time.c11
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/sched/sch_htb.c15
-rw-r--r--net/sctp/input.c13
-rw-r--r--net/sctp/output.c7
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/socket.c63
-rw-r--r--net/sunrpc/Kconfig78
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/wimax/debugfs.c11
-rw-r--r--net/wimax/stack.c13
-rw-r--r--net/wireless/reg.c142
-rw-r--r--net/xfrm/xfrm_ipcomp.c1
53 files changed, 599 insertions, 259 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 6c132394026..e9db889d622 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -62,13 +62,13 @@ struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
return vlan_dev_info(dev)->real_dev;
}
-EXPORT_SYMBOL_GPL(vlan_dev_real_dev);
+EXPORT_SYMBOL(vlan_dev_real_dev);
u16 vlan_dev_vlan_id(const struct net_device *dev)
{
return vlan_dev_info(dev)->vlan_id;
}
-EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
+EXPORT_SYMBOL(vlan_dev_vlan_id);
static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 0663f99e977..7ed75c7bd5d 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -23,7 +23,7 @@ config NET_9P_VIRTIO
guest partitions and a host partition.
config NET_9P_RDMA
- depends on INET && INFINIBAND && EXPERIMENTAL
+ depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
tristate "9P RDMA Transport (Experimental)"
help
This builds support for an RDMA transport.
diff --git a/net/9p/client.c b/net/9p/client.c
index 821f1ec0b2c..1eb580c38fb 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -618,7 +618,7 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
return ERR_PTR(-ENOMEM);
ret = p9_idpool_get(clnt->fidpool);
- if (fid->fid < 0) {
+ if (ret < 0) {
ret = -ENOSPC;
goto error;
}
diff --git a/net/Kconfig b/net/Kconfig
index bf2776018f7..cdb8fdef6c4 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -24,14 +24,6 @@ if NET
menu "Networking options"
-config NET_NS
- bool "Network namespace support"
- default n
- depends on EXPERIMENTAL && NAMESPACES
- help
- Allow user space to create what appear to be multiple instances
- of the network stack.
-
config COMPAT_NET_DEV_OPS
def_bool y
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index a65e43a17fb..cf754ace0b7 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -58,11 +58,11 @@ static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 1;
-static int brnf_filter_pppoe_tagged __read_mostly = 1;
+static int brnf_filter_vlan_tagged __read_mostly = 0;
+static int brnf_filter_pppoe_tagged __read_mostly = 0;
#else
-#define brnf_filter_vlan_tagged 1
-#define brnf_filter_pppoe_tagged 1
+#define brnf_filter_vlan_tagged 0
+#define brnf_filter_pppoe_tagged 0
#endif
static inline __be16 vlan_proto(const struct sk_buff *skb)
@@ -686,8 +686,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
- else
+ else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
+ IS_PPPOE_IPV6(skb))
pf = PF_INET6;
+ else
+ return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
@@ -828,8 +831,11 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
- else
+ else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
+ IS_PPPOE_IPV6(skb))
pf = PF_INET6;
+ else
+ return NF_ACCEPT;
#ifdef CONFIG_NETFILTER_DEBUG
if (skb->dst == NULL) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index fa108c46e85..820252aee81 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -79,18 +79,19 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
{
par->match = m->u.match;
par->matchinfo = m->data;
- return m->u.match->match(skb, par);
+ return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
}
static inline int ebt_dev_check(char *entry, const struct net_device *device)
{
int i = 0;
- const char *devname = device->name;
+ const char *devname;
if (*entry == '\0')
return 0;
if (!device)
return 1;
+ devname = device->name;
/* 1 is the wildcard token */
while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
i++;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 1649c8ab2c2..b7c7d465113 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -347,51 +347,54 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
struct bcm_op *op = (struct bcm_op *)data;
struct bcm_msg_head msg_head;
- /* create notification to user */
- msg_head.opcode = TX_EXPIRED;
- msg_head.flags = op->flags;
- msg_head.count = op->count;
- msg_head.ival1 = op->ival1;
- msg_head.ival2 = op->ival2;
- msg_head.can_id = op->can_id;
- msg_head.nframes = 0;
-
- bcm_send_to_user(op, &msg_head, NULL, 0);
-}
-
-/*
- * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
- */
-static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
-{
- struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
- enum hrtimer_restart ret = HRTIMER_NORESTART;
-
if (op->kt_ival1.tv64 && (op->count > 0)) {
op->count--;
- if (!op->count && (op->flags & TX_COUNTEVT))
- tasklet_schedule(&op->tsklet);
+ if (!op->count && (op->flags & TX_COUNTEVT)) {
+
+ /* create notification to user */
+ msg_head.opcode = TX_EXPIRED;
+ msg_head.flags = op->flags;
+ msg_head.count = op->count;
+ msg_head.ival1 = op->ival1;
+ msg_head.ival2 = op->ival2;
+ msg_head.can_id = op->can_id;
+ msg_head.nframes = 0;
+
+ bcm_send_to_user(op, &msg_head, NULL, 0);
+ }
}
if (op->kt_ival1.tv64 && (op->count > 0)) {
/* send (next) frame */
bcm_can_tx(op);
- hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1);
- ret = HRTIMER_RESTART;
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival1),
+ HRTIMER_MODE_ABS);
} else {
if (op->kt_ival2.tv64) {
/* send (next) frame */
bcm_can_tx(op);
- hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
- ret = HRTIMER_RESTART;
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival2),
+ HRTIMER_MODE_ABS);
}
}
+}
- return ret;
+/*
+ * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
+ */
+static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+{
+ struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+
+ tasklet_schedule(&op->tsklet);
+
+ return HRTIMER_NORESTART;
}
/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 5f736f1ceea..5379b0c1190 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1088,6 +1088,11 @@ int dev_open(struct net_device *dev)
dev->flags |= IFF_UP;
/*
+ * Enable NET_DMA
+ */
+ dmaengine_get();
+
+ /*
* Initialize multicasting status
*/
dev_set_rx_mode(dev);
@@ -1164,6 +1169,11 @@ int dev_close(struct net_device *dev)
*/
call_netdevice_notifiers(NETDEV_DOWN, dev);
+ /*
+ * Shutdown NET_DMA
+ */
+ dmaengine_put();
+
return 0;
}
@@ -1524,7 +1534,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
skb->mac_len = skb->network_header - skb->mac_header;
__skb_pull(skb, skb->mac_len);
- if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ struct net_device *dev = skb->dev;
+ struct ethtool_drvinfo info = {};
+
+ if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
+ dev->ethtool_ops->get_drvinfo(dev, &info);
+
+ WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
+ "ip_summed=%d",
+ info.driver, dev ? dev->features : 0L,
+ skb->sk ? skb->sk->sk_route_caps : 0L,
+ skb->len, skb->data_len, skb->ip_summed);
+
if (skb_header_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
return ERR_PTR(err);
@@ -2382,6 +2404,9 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
if (!(skb->dev->features & NETIF_F_GRO))
goto normal;
+ if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
+ goto normal;
+
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
struct sk_buff *p;
@@ -2478,12 +2503,6 @@ EXPORT_SYMBOL(napi_gro_receive);
void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
{
- skb_shinfo(skb)->nr_frags = 0;
-
- skb->len -= skb->data_len;
- skb->truesize -= skb->data_len;
- skb->data_len = 0;
-
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
@@ -2517,6 +2536,7 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
if (!pskb_may_pull(skb, ETH_HLEN)) {
napi_reuse_skb(napi, skb);
+ skb = NULL;
goto out;
}
@@ -4424,6 +4444,45 @@ err_uninit:
}
/**
+ * init_dummy_netdev - init a dummy network device for NAPI
+ * @dev: device to init
+ *
+ * This takes a network device structure and initialize the minimum
+ * amount of fields so it can be used to schedule NAPI polls without
+ * registering a full blown interface. This is to be used by drivers
+ * that need to tie several hardware interfaces to a single NAPI
+ * poll scheduler due to HW limitations.
+ */
+int init_dummy_netdev(struct net_device *dev)
+{
+ /* Clear everything. Note we don't initialize spinlocks
+ * are they aren't supposed to be taken by any of the
+ * NAPI code and this dummy netdev is supposed to be
+ * only ever used for NAPI polls
+ */
+ memset(dev, 0, sizeof(struct net_device));
+
+ /* make sure we BUG if trying to hit standard
+ * register/unregister code path
+ */
+ dev->reg_state = NETREG_DUMMY;
+
+ /* initialize the ref count */
+ atomic_set(&dev->refcnt, 1);
+
+ /* NAPI wants this */
+ INIT_LIST_HEAD(&dev->napi_list);
+
+ /* a dummy interface is started by default */
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+ set_bit(__LINK_STATE_START, &dev->state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(init_dummy_netdev);
+
+
+/**
* register_netdev - register a network device
* @dev: device to register
*
@@ -5151,9 +5210,6 @@ static int __init net_dev_init(void)
hotcpu_notifier(dev_cpu_callback, 0);
dst_init();
dev_mcast_init();
- #ifdef CONFIG_NET_DMA
- dmaengine_get();
- #endif
rc = 0;
out:
return rc;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55cffad2f32..55151faaf90 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -341,8 +341,8 @@ again:
rv = register_pernet_operations(first_device, ops);
if (rv < 0)
ida_remove(&net_generic_ids, *id);
- mutex_unlock(&net_mutex);
out:
+ mutex_unlock(&net_mutex);
return rv;
}
EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5110b359c75..da74b844f4e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -73,17 +73,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- struct sk_buff *skb = (struct sk_buff *) buf->private;
-
- kfree_skb(skb);
+ put_page(buf->page);
}
static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- struct sk_buff *skb = (struct sk_buff *) buf->private;
-
- skb_get(skb);
+ get_page(buf->page);
}
static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -1334,9 +1330,19 @@ fault:
*/
static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
- struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
+ put_page(spd->pages[i]);
+}
+
+static inline struct page *linear_to_page(struct page *page, unsigned int len,
+ unsigned int offset)
+{
+ struct page *p = alloc_pages(GFP_KERNEL, 0);
- kfree_skb(skb);
+ if (!p)
+ return NULL;
+ memcpy(page_address(p) + offset, page_address(page) + offset, len);
+
+ return p;
}
/*
@@ -1344,16 +1350,23 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
*/
static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
unsigned int len, unsigned int offset,
- struct sk_buff *skb)
+ struct sk_buff *skb, int linear)
{
if (unlikely(spd->nr_pages == PIPE_BUFFERS))
return 1;
+ if (linear) {
+ page = linear_to_page(page, len, offset);
+ if (!page)
+ return 1;
+ } else
+ get_page(page);
+
spd->pages[spd->nr_pages] = page;
spd->partial[spd->nr_pages].len = len;
spd->partial[spd->nr_pages].offset = offset;
- spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
spd->nr_pages++;
+
return 0;
}
@@ -1369,7 +1382,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
static inline int __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
unsigned int *len, struct sk_buff *skb,
- struct splice_pipe_desc *spd)
+ struct splice_pipe_desc *spd, int linear)
{
if (!*len)
return 1;
@@ -1392,7 +1405,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
/* the linear region may spread across several pages */
flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
- if (spd_fill_page(spd, page, flen, poff, skb))
+ if (spd_fill_page(spd, page, flen, poff, skb, linear))
return 1;
__segment_seek(&page, &poff, &plen, flen);
@@ -1419,7 +1432,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
- offset, len, skb, spd))
+ offset, len, skb, spd, 1))
return 1;
/*
@@ -1429,7 +1442,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(f->page, f->page_offset, f->size,
- offset, len, skb, spd))
+ offset, len, skb, spd, 0))
return 1;
}
@@ -1442,7 +1455,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
* the frag list, if such a thing exists. We'd probably need to recurse to
* handle that cleanly.
*/
-int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
unsigned int flags)
{
@@ -1455,16 +1468,6 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
.ops = &sock_pipe_buf_ops,
.spd_release = sock_spd_release,
};
- struct sk_buff *skb;
-
- /*
- * I'd love to avoid the clone here, but tcp_read_sock()
- * ignores reference counts and unconditonally kills the sk_buff
- * on return from the actor.
- */
- skb = skb_clone(__skb, GFP_KERNEL);
- if (unlikely(!skb))
- return -ENOMEM;
/*
* __skb_splice_bits() only fails if the output has no room left,
@@ -1488,15 +1491,9 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
}
done:
- /*
- * drop our reference to the clone, the pipe consumption will
- * drop the rest.
- */
- kfree_skb(skb);
-
if (spd.nr_pages) {
+ struct sock *sk = skb->sk;
int ret;
- struct sock *sk = __skb->sk;
/*
* Drop the socket lock, otherwise we have reverse
@@ -2215,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
return 0;
next_skb:
- block_limit = skb_headlen(st->cur_skb);
+ block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit) {
- *data = st->cur_skb->data + abs_offset;
+ *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
@@ -2253,13 +2250,14 @@ next_skb:
st->frag_data = NULL;
}
- if (st->cur_skb->next) {
- st->cur_skb = st->cur_skb->next;
+ if (st->root_skb == st->cur_skb &&
+ skb_shinfo(st->root_skb)->frag_list) {
+ st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
- } else if (st->root_skb == st->cur_skb &&
- skb_shinfo(st->root_skb)->frag_list) {
- st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
+ } else if (st->cur_skb->next) {
+ st->cur_skb = st->cur_skb->next;
+ st->frag_idx = 0;
goto next_skb;
}
@@ -2588,8 +2586,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct sk_buff *nskb;
unsigned int headroom;
unsigned int hlen = p->data - skb_mac_header(p);
+ unsigned int len = skb->len;
- if (hlen + p->len + skb->len >= 65536)
+ if (hlen + p->len + len >= 65536)
return -E2BIG;
if (skb_shinfo(p)->frag_list)
@@ -2602,6 +2601,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
+ skb_shinfo(skb)->nr_frags = 0;
+
+ skb->truesize -= skb->data_len;
+ skb->len -= skb->data_len;
+ skb->data_len = 0;
+
NAPI_GRO_CB(skb)->free = 1;
goto done;
}
@@ -2645,9 +2650,9 @@ merge:
done:
NAPI_GRO_CB(p)->count++;
- p->data_len += skb->len;
- p->truesize += skb->len;
- p->len += skb->len;
+ p->data_len += len;
+ p->truesize += len;
+ p->len += len;
NAPI_GRO_CB(skb)->same_flow = 1;
return 0;
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index b28bf962edc..4b5db44970a 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -29,7 +29,7 @@ config IP_DCCP_CCID3
http://www.ietf.org/rfc/rfc4342.txt
The TFRC congestion control algorithms were initially described in
- RFC 5448.
+ RFC 5348.
This text was extracted from RFC 4340 (sec. 10.2),
http://www.ietf.org/rfc/rfc4340.txt
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 60c412ccfee..4902029854d 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -36,7 +36,7 @@ out:
return rc;
}
-void __exit tfrc_lib_exit(void)
+void tfrc_lib_exit(void)
{
tfrc_rx_packet_history_exit();
tfrc_tx_packet_history_exit();
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 42a0f3dd3fd..d722013c1ca 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name)
static int __init ip_auto_config(void)
{
__be32 addr;
+#ifdef IPCONFIG_DYNAMIC
+ int retries = CONF_OPEN_RETRIES;
+#endif
#ifdef CONFIG_PROC_FS
proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void)
#endif
ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC
-
- int retries = CONF_OPEN_RETRIES;
-
if (ic_dynamic() < 0) {
ic_close_devs();
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c9224310eba..52cb6939d09 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -93,13 +93,8 @@ ipt_local_out_hook(unsigned int hook,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("iptable_filter: ignoring short SOCK_RAW "
- "packet.\n");
+ ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
-
return ipt_do_table(skb, hook, in, out,
dev_net(out)->ipv4.iptable_filter);
}
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 69f2c428714..3929d20b9e4 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -132,12 +132,8 @@ ipt_local_hook(unsigned int hook,
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr)
- || ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("iptable_mangle: ignoring short SOCK_RAW "
- "packet.\n");
+ || ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
/* Save things which could affect route */
mark = skb->mark;
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 8faebfe638f..7f65d18333e 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -65,12 +65,8 @@ ipt_local_hook(unsigned int hook,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("iptable_raw: ignoring short SOCK_RAW "
- "packet.\n");
+ ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
return ipt_do_table(skb, hook, in, out,
dev_net(out)->ipv4.iptable_raw);
}
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 36f3be3cc42..a52a35f4a58 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -96,12 +96,8 @@ ipt_local_out_hook(unsigned int hook,
{
/* Somebody is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr)
- || ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk(KERN_INFO "iptable_security: ignoring short "
- "SOCK_RAW packet.\n");
+ || ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
return ipt_do_table(skb, hook, in, out,
dev_net(out)->ipv4.iptable_security);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index b2141e11575..4beb04fac58 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -145,11 +145,8 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("ipt_hook: happy cracking.\n");
+ ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
- }
return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 1fd3ef7718b..2a8bee26f43 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -20,7 +20,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_log.h>
-static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ce572f9dff0..76b148bcb0d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -522,8 +522,13 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{
struct tcp_splice_state *tss = rd_desc->arg.data;
+ int ret;
- return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
+ ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
+ tss->flags);
+ if (ret > 0)
+ rd_desc->count -= ret;
+ return ret;
}
static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
@@ -531,6 +536,7 @@ static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
/* Store TCP splice context information in read_descriptor_t. */
read_descriptor_t rd_desc = {
.arg.data = tss,
+ .count = tss->len,
};
return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
@@ -611,11 +617,13 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
tss.len -= ret;
spliced += ret;
+ if (!timeo)
+ break;
release_sock(sk);
lock_sock(sk);
if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
- (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current))
break;
}
@@ -2382,7 +2390,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
unsigned int seq;
__be32 delta;
unsigned int oldlen;
- unsigned int len;
+ unsigned int mss;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
@@ -2398,10 +2406,13 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
oldlen = (u16)~skb->len;
__skb_pull(skb, thlen);
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
- int mss;
if (unlikely(type &
~(SKB_GSO_TCPV4 |
@@ -2412,7 +2423,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
goto out;
- mss = skb_shinfo(skb)->gso_size;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
@@ -2423,8 +2433,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
if (IS_ERR(segs))
goto out;
- len = skb_shinfo(skb)->gso_size;
- delta = htonl(oldlen + (thlen + len));
+ delta = htonl(oldlen + (thlen + mss));
skb = segs;
th = tcp_hdr(skb);
@@ -2440,7 +2449,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
csum_fold(csum_partial(skb_transport_header(skb),
thlen, skb->csum));
- seq += len;
+ seq += mss;
skb = skb->next;
th = tcp_hdr(skb);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cf5ab0581eb..b7faffe5c02 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
+#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
+
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
+ unsigned long *bitmap,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2))
@@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
sk_nulls_for_each(sk2, node, &hslot->head)
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
- sk2->sk_hash == num &&
+ (bitmap || sk2->sk_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
- (*saddr_comp)(sk, sk2))
- return 1;
+ (*saddr_comp)(sk, sk2)) {
+ if (bitmap)
+ __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
+ bitmap);
+ else
+ return 1;
+ }
return 0;
}
@@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
if (!snum) {
int low, high, remaining;
unsigned rand;
- unsigned short first;
+ unsigned short first, last;
+ DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
rand = net_random();
- snum = first = rand % remaining + low;
- rand |= 1;
- for (;;) {
- hslot = &udptable->hash[udp_hashfn(net, snum)];
+ first = (((u64)rand * remaining) >> 32) + low;
+ /*
+ * force rand to be an odd multiple of UDP_HTABLE_SIZE
+ */
+ rand = (rand | 1) * UDP_HTABLE_SIZE;
+ for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
+ hslot = &udptable->hash[udp_hashfn(net, first)];
+ bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
- if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
- break;
- spin_unlock_bh(&hslot->lock);
+ udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
+ saddr_comp);
+
+ snum = first;
+ /*
+ * Iterate on all possible values of snum for this hash.
+ * Using steps of an odd multiple of UDP_HTABLE_SIZE
+ * give us randomization and full range coverage.
+ */
do {
- snum = snum + rand;
- } while (snum < low || snum > high);
- if (snum == first)
- goto fail;
+ if (low <= snum && snum <= high &&
+ !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
+ goto found;
+ snum += rand;
+ } while (snum != first);
+ spin_unlock_bh(&hslot->lock);
}
+ goto fail;
} else {
hslot = &udptable->hash[udp_hashfn(net, snum)];
spin_lock_bh(&hslot->lock);
- if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
+ if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
goto fail_unlock;
}
+found:
inet_sk(sk)->num = snum;
sk->sk_hash = snum;
if (sk_unhashed(sk)) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e92ad8455c6..f9afb452249 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table
.procname = "mc_forwarding",
.data = &ipv6_devconf.mc_forwarding,
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0444,
.proc_handler = proc_dointvec,
},
#endif
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 94f74f5b0cb..c802bc1658a 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -797,6 +797,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
unsigned int nlen;
int flush = 1;
int proto;
+ __wsum csum;
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
@@ -808,6 +809,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
rcu_read_lock();
proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
+ iph = ipv6_hdr(skb);
IPV6_GRO_CB(skb)->proto = proto;
ops = rcu_dereference(inet6_protos[proto]);
if (!ops || !ops->gro_receive)
@@ -839,8 +841,13 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
NAPI_GRO_CB(skb)->flush |= flush;
+ csum = skb->csum;
+ skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+
pp = ops->gro_receive(head, skb);
+ skb->csum = csum;
+
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4f433847d95..36dff880718 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
goto relookup_failed;
- if (ip6_dst_lookup(sk, &dst2, &fl))
+ if (ip6_dst_lookup(sk, &dst2, &fl2))
goto relookup_failed;
- err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP);
+ err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
switch (err) {
case 0:
dst_release(dst);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 29c7c99e69f..52ee1dced2f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb)
struct fib6_walker_t *w = (void*)cb->args[2];
if (w) {
+ if (cb->args[4]) {
+ cb->args[4] = 0;
+ fib6_walker_unlink(w);
+ }
cb->args[2] = 0;
kfree(w);
}
@@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
read_lock_bh(&table->tb6_lock);
res = fib6_walk_continue(w);
read_unlock_bh(&table->tb6_lock);
- if (res != 0) {
- if (res < 0)
- fib6_walker_unlink(w);
- goto end;
+ if (res <= 0) {
+ fib6_walker_unlink(w);
+ cb->args[4] = 0;
}
- fib6_walker_unlink(w);
- cb->args[4] = 0;
}
-end:
+
return res;
}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 936f48946e2..f171e8dbac9 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb)
* IPv6 multicast router mode is now supported ;)
*/
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
+ !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
/*
* Okay, we try to forward - split and duplicate
@@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb)
}
if (skb2) {
- skb2->dev = skb2->dst->dev;
ip6_mr_input(skb2);
}
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 3c51b2d827f..228be551e9c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -48,6 +48,7 @@
#include <linux/pim.h>
#include <net/addrconf.h>
#include <linux/netfilter_ipv6.h>
+#include <net/ip6_checksum.h>
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
@@ -365,7 +366,9 @@ static int pim6_rcv(struct sk_buff *skb)
pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
(pim->flags & PIM_NULL_REGISTER) ||
- (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
+ (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ sizeof(*pim), IPPROTO_PIM,
+ csum_partial((void *)pim, sizeof(*pim), 0)) &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
@@ -392,7 +395,7 @@ static int pim6_rcv(struct sk_buff *skb)
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->dev = reg_dev;
- skb->protocol = htons(ETH_P_IP);
+ skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
@@ -481,6 +484,7 @@ static int mif6_delete(struct net *net, int vifi)
{
struct mif_device *v;
struct net_device *dev;
+ struct inet6_dev *in6_dev;
if (vifi < 0 || vifi >= net->ipv6.maxvif)
return -EADDRNOTAVAIL;
@@ -513,6 +517,10 @@ static int mif6_delete(struct net *net, int vifi)
dev_set_allmulti(dev, -1);
+ in6_dev = __in6_dev_get(dev);
+ if (in6_dev)
+ in6_dev->cnf.mc_forwarding--;
+
if (v->flags & MIFF_REGISTER)
unregister_netdevice(dev);
@@ -622,6 +630,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
int vifi = vifc->mif6c_mifi;
struct mif_device *v = &net->ipv6.vif6_table[vifi];
struct net_device *dev;
+ struct inet6_dev *in6_dev;
int err;
/* Is vif busy ? */
@@ -662,6 +671,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
return -EINVAL;
}
+ in6_dev = __in6_dev_get(dev);
+ if (in6_dev)
+ in6_dev->cnf.mc_forwarding++;
+
/*
* Fill in the VIF structures
*/
@@ -838,8 +851,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
skb->dst = dst_clone(pkt->dst);
skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_pull(skb, sizeof(struct ipv6hdr));
}
if (net->ipv6.mroute6_sk == NULL) {
@@ -1222,8 +1233,10 @@ static int ip6mr_sk_init(struct sock *sk)
rtnl_lock();
write_lock_bh(&mrt_lock);
- if (likely(net->ipv6.mroute6_sk == NULL))
+ if (likely(net->ipv6.mroute6_sk == NULL)) {
net->ipv6.mroute6_sk = sk;
+ net->ipv6.devconf_all->mc_forwarding++;
+ }
else
err = -EADDRINUSE;
write_unlock_bh(&mrt_lock);
@@ -1242,6 +1255,7 @@ int ip6mr_sk_done(struct sock *sk)
if (sk == net->ipv6.mroute6_sk) {
write_lock_bh(&mrt_lock);
net->ipv6.mroute6_sk = NULL;
+ net->ipv6.devconf_all->mc_forwarding--;
write_unlock_bh(&mrt_lock);
mroute_clean_tables(net);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index bd52151d31e..c455cf4ee75 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -26,7 +26,7 @@
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_log.h>
-static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c4a59824ac2..9c574235c90 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb)
.proto = iph->nexthdr,
};
- if (rt6_need_strict(&iph->daddr))
+ if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE;
skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f8bd8df5e25..7dcbde3ea7d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1285,6 +1285,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1];
natt->encap_dport = n_port->sadb_x_nat_t_port_port;
}
+ memset(&natt->encap_oa, 0, sizeof(natt->encap_oa));
}
err = xfrm_init_state(x);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 5f510a13b9f..c5c0c527109 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -469,7 +469,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
struct ieee80211_sub_if_data *sdata;
u16 start_seq_num;
u8 *state;
- int ret;
+ int ret = 0;
if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
return -EINVAL;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5abbc3f07dd..b9074824862 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -699,7 +699,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
return 0;
/* Setting ad-hoc mode on non-IBSS channel is not supported. */
- if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)
+ if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
+ type == NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
/*
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 929ba542fd7..1159bdb4119 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -107,6 +107,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
sta->flags = WLAN_STA_AUTHORIZED;
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+ rate_control_rate_init(sta);
return sta;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5ba721b6a39..2b890af01ba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -620,8 +620,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
if (use_short_slot != bss_conf->use_short_slot) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: switched to %s slot"
- " (BSSID=%s)\n",
+ printk(KERN_DEBUG "%s: switched to %s slot time"
+ " (BSSID=%pM)\n",
sdata->dev->name,
use_short_slot ? "short" : "long",
ifsta->bssid);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 2b3b490a607..3824990d340 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -395,13 +395,15 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
{
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
- struct minstrel_rate *mr_ctl;
+ struct ieee80211_local *local = hw_to_local(mp->hw);
+ struct ieee80211_rate *ctl_rate;
unsigned int i, n = 0;
unsigned int t_slot = 9; /* FIXME: get real slot time */
mi->lowest_rix = rate_lowest_index(sband, sta);
- mr_ctl = &mi->r[rix_to_ndx(mi, mi->lowest_rix)];
- mi->sp_ack_dur = mr_ctl->ack_time;
+ ctl_rate = &sband->bitrates[mi->lowest_rix];
+ mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
+ !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
for (i = 0; i < sband->n_bitrates; i++) {
struct minstrel_rate *mr = &mi->r[n];
@@ -416,7 +418,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
mr->rix = i;
mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(mi, hw_to_local(mp->hw), mr,
+ calc_rate_durations(mi, local, mr,
&sband->bitrates[i]);
/* calculate maximum number of retransmissions before
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index dc2606d0ae7..e49a5b99cf1 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -195,7 +195,6 @@ struct sta_ampdu_mlme {
* @tx_packets: number of RX/TX MSDUs
* @tx_bytes: number of bytes transmitted to this STA
* @tx_fragments: number of transmitted MPDUs
- * @last_txrate: description of the last used transmit rate
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index a4af3a124cc..4278e545638 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1307,8 +1307,10 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (is_multicast_ether_addr(hdr->addr3))
memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
else
- if (mesh_nexthop_lookup(skb, osdata))
- return 0;
+ if (mesh_nexthop_lookup(skb, osdata)) {
+ dev_put(odev);
+ return 0;
+ }
if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
fwded_frames);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7e83f74cd5d..90ce9ddb945 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -469,7 +469,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_tuple *repl,
gfp_t gfp)
{
- struct nf_conn *ct = NULL;
+ struct nf_conn *ct;
if (unlikely(!nf_conntrack_hash_rnd_initted)) {
get_random_bytes(&nf_conntrack_hash_rnd, 4);
@@ -551,7 +551,7 @@ init_conntrack(struct net *net,
}
ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
- if (ct == NULL || IS_ERR(ct)) {
+ if (IS_ERR(ct)) {
pr_debug("Can't allocate conntrack.\n");
return (struct nf_conntrack_tuple_hash *)ct;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 00e8c27130f..c32a7e8e3a1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -831,13 +831,16 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
if (!parse_nat_setup) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
+ spin_unlock_bh(&nf_conntrack_lock);
nfnl_unlock();
if (request_module("nf-nat-ipv4") < 0) {
nfnl_lock();
+ spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
return -EOPNOTSUPP;
}
nfnl_lock();
+ spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
if (nfnetlink_parse_nat_setup_hook)
return -EAGAIN;
@@ -1134,7 +1137,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
struct nf_conntrack_helper *helper;
ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
- if (ct == NULL || IS_ERR(ct))
+ if (IS_ERR(ct))
return -ENOMEM;
if (!cda[CTA_TIMEOUT])
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 89837a4eef7..bfbf521f6ea 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -273,6 +273,10 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
have_rev = 1;
}
}
+
+ if (af != NFPROTO_UNSPEC && !have_rev)
+ return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+
return have_rev;
}
@@ -289,6 +293,10 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
have_rev = 1;
}
}
+
+ if (af != NFPROTO_UNSPEC && !have_rev)
+ return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+
return have_rev;
}
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 29375ba8db7..93acaa59d10 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -243,6 +243,17 @@ static struct xt_match xt_time_mt_reg __read_mostly = {
static int __init time_mt_init(void)
{
+ int minutes = sys_tz.tz_minuteswest;
+
+ if (minutes < 0) /* east of Greenwich */
+ printk(KERN_INFO KBUILD_MODNAME
+ ": kernel timezone is +%02d%02d\n",
+ -minutes / 60, -minutes % 60);
+ else /* west of Greenwich */
+ printk(KERN_INFO KBUILD_MODNAME
+ ": kernel timezone is -%02d%02d\n",
+ minutes / 60, minutes % 60);
+
return xt_register_match(&xt_time_mt_reg);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5f94db2f3e9..9454d4ae46d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -77,6 +77,7 @@
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@@ -175,6 +176,7 @@ struct packet_sock {
#endif
struct packet_type prot_hook;
spinlock_t bind_lock;
+ struct mutex pg_vec_lock;
unsigned int running:1, /* prot_hook is attached*/
auxdata:1,
origdev:1;
@@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
*/
spin_lock_init(&po->bind_lock);
+ mutex_init(&po->pg_vec_lock);
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
@@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
synchronize_net();
err = -EBUSY;
+ mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
@@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
}
+ mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running && !po->running) {
@@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
size = vma->vm_end - vma->vm_start;
- lock_sock(sk);
+ mutex_lock(&po->pg_vec_lock);
if (po->pg_vec == NULL)
goto out;
if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
@@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
err = 0;
out:
- release_sock(sk);
+ mutex_unlock(&po->pg_vec_lock);
return err;
}
#endif
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5070643ce53..2f0f0b04d3f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -661,12 +661,13 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
* next pending event (0 for no event in pq).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static psched_time_t htb_do_events(struct htb_sched *q, int level)
+static psched_time_t htb_do_events(struct htb_sched *q, int level,
+ unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
1 to simplify things when jiffy is going to be incremented
too soon */
- unsigned long stop_at = jiffies + 2;
+ unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
long diff;
@@ -685,8 +686,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
- /* too much load - let's continue on next jiffie */
- return q->now + PSCHED_TICKS_PER_SEC / HZ;
+ /* too much load - let's continue on next jiffie (including above) */
+ return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -845,6 +846,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
+ unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
@@ -857,6 +859,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (!sch->q.qlen)
goto fin;
q->now = psched_get_time();
+ start_at = jiffies;
next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
@@ -866,14 +869,14 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
psched_time_t event;
if (q->now >= q->near_ev_cache[level]) {
- event = htb_do_events(q, level);
+ event = htb_do_events(q, level, start_at);
if (!event)
event = q->now + PSCHED_TICKS_PER_SEC;
q->near_ev_cache[level] = event;
} else
event = q->near_ev_cache[level];
- if (event && next_event > event)
+ if (next_event > event)
next_event = event;
m = ~q->row_mask[level];
diff --git a/net/sctp/input.c b/net/sctp/input.c
index bf612d954d4..2e4a8646dbc 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -249,6 +249,19 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock(sk);
+ if (sk != rcvr->sk) {
+ /* Our cached sk is different from the rcvr->sk. This is
+ * because migrate()/accept() may have moved the association
+ * to a new socket and released all the sockets. So now we
+ * are holding a lock on the old socket while the user may
+ * be doing something with the new socket. Switch our veiw
+ * of the current sk.
+ */
+ sctp_bh_unlock_sock(sk);
+ sk = rcvr->sk;
+ sctp_bh_lock_sock(sk);
+ }
+
if (sock_owned_by_user(sk)) {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
sctp_add_backlog(sk, skb);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index c3f417f7ec6..73639355157 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -324,14 +324,16 @@ append:
switch (chunk->chunk_hdr->type) {
case SCTP_CID_DATA:
retval = sctp_packet_append_data(packet, chunk);
+ if (SCTP_XMIT_OK != retval)
+ goto finish;
/* Disallow SACK bundling after DATA. */
packet->has_sack = 1;
/* Disallow AUTH bundling after DATA */
packet->has_auth = 1;
/* Let it be knows that packet has DATA in it */
packet->has_data = 1;
- if (SCTP_XMIT_OK != retval)
- goto finish;
+ /* timestamp the chunk for rtx purposes */
+ chunk->sent_at = jiffies;
break;
case SCTP_CID_COOKIE_ECHO:
packet->has_cookie_echo = 1;
@@ -470,7 +472,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
} else
chunk->resent = 1;
- chunk->sent_at = jiffies;
has_data = 1;
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 247ebc95c1e..bc411c89621 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -929,7 +929,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
/* Finally, transmit new packets. */
- start_timer = 0;
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
@@ -1028,7 +1027,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
list_add_tail(&chunk->transmitted_list,
&transport->transmitted);
- sctp_transport_reset_timers(transport, start_timer-1);
+ sctp_transport_reset_timers(transport, 0);
q->empty = 0;
diff --git a/net/socket.c b/net/socket.c
index 06603d73c41..35dd7371752 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1214,7 +1214,7 @@ int sock_create_kern(int family, int type, int protocol, struct socket **res)
return __sock_create(&init_net, family, type, protocol, res, 1);
}
-asmlinkage long sys_socket(int family, int type, int protocol)
+SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
{
int retval;
struct socket *sock;
@@ -1255,8 +1255,8 @@ out_release:
* Create a pair of connected sockets.
*/
-asmlinkage long sys_socketpair(int family, int type, int protocol,
- int __user *usockvec)
+SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+ int __user *, usockvec)
{
struct socket *sock1, *sock2;
int fd1, fd2, err;
@@ -1356,7 +1356,7 @@ out_fd1:
* the protocol layer (having also checked the address is ok).
*/
-asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
+SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1385,7 +1385,7 @@ asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
* ready for listening.
*/
-asmlinkage long sys_listen(int fd, int backlog)
+SYSCALL_DEFINE2(listen, int, fd, int, backlog)
{
struct socket *sock;
int err, fput_needed;
@@ -1418,8 +1418,8 @@ asmlinkage long sys_listen(int fd, int backlog)
* clean when we restucture accept also.
*/
-asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags)
+SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen, int, flags)
{
struct socket *sock, *newsock;
struct file *newfile;
@@ -1502,8 +1502,8 @@ out_fd:
goto out_put;
}
-asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen)
+SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen)
{
return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
}
@@ -1520,8 +1520,8 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
* include the -EINPROGRESS status for such sockets.
*/
-asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr,
- int addrlen)
+SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ int, addrlen)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1552,8 +1552,8 @@ out:
* name to user space.
*/
-asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len)
+SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1583,8 +1583,8 @@ out:
* name to user space.
*/
-asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len)
+SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1615,9 +1615,9 @@ asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
* the protocol.
*/
-asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
- unsigned flags, struct sockaddr __user *addr,
- int addr_len)
+SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int, addr_len)
{
struct socket *sock;
struct sockaddr_storage address;
@@ -1660,7 +1660,8 @@ out:
* Send a datagram down a socket.
*/
-asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
+SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags)
{
return sys_sendto(fd, buff, len, flags, NULL, 0);
}
@@ -1671,9 +1672,9 @@ asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
* sender address from kernel to user space.
*/
-asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
- unsigned flags, struct sockaddr __user *addr,
- int __user *addr_len)
+SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int __user *, addr_len)
{
struct socket *sock;
struct iovec iov;
@@ -1725,8 +1726,8 @@ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
* to pass the user mode parameter for the protocols to sort out.
*/
-asmlinkage long sys_setsockopt(int fd, int level, int optname,
- char __user *optval, int optlen)
+SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int, optlen)
{
int err, fput_needed;
struct socket *sock;
@@ -1759,8 +1760,8 @@ out_put:
* to pass a user mode parameter for the protocols to sort out.
*/
-asmlinkage long sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen)
+SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int __user *, optlen)
{
int err, fput_needed;
struct socket *sock;
@@ -1789,7 +1790,7 @@ out_put:
* Shutdown a socket.
*/
-asmlinkage long sys_shutdown(int fd, int how)
+SYSCALL_DEFINE2(shutdown, int, fd, int, how)
{
int err, fput_needed;
struct socket *sock;
@@ -1815,7 +1816,7 @@ asmlinkage long sys_shutdown(int fd, int how)
* BSD sendmsg interface
*/
-asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -1921,8 +1922,8 @@ out:
* BSD recvmsg interface
*/
-asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg,
- unsigned int flags)
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+ unsigned int, flags)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -2045,7 +2046,7 @@ static const unsigned char nargs[19]={
* it is set by the callees.
*/
-asmlinkage long sys_socketcall(int call, unsigned long __user *args)
+SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
{
unsigned long a[6];
unsigned long a0, a1;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
new file mode 100644
index 00000000000..dcef600d0bf
--- /dev/null
+++ b/net/sunrpc/Kconfig
@@ -0,0 +1,78 @@
+config SUNRPC
+ tristate
+
+config SUNRPC_GSS
+ tristate
+
+config SUNRPC_XPRT_RDMA
+ tristate
+ depends on SUNRPC && INFINIBAND && EXPERIMENTAL
+ default SUNRPC && INFINIBAND
+ help
+ This option allows the NFS client and server to support
+ an RDMA-enabled transport.
+
+ To compile RPC client RDMA transport support as a module,
+ choose M here: the module will be called xprtrdma.
+
+ If unsure, say N.
+
+config SUNRPC_REGISTER_V4
+ bool "Register local RPC services via rpcbind v4 (EXPERIMENTAL)"
+ depends on SUNRPC && EXPERIMENTAL
+ default n
+ help
+ Sun added support for registering RPC services at an IPv6
+ address by creating two new versions of the rpcbind protocol
+ (RFC 1833).
+
+ This option enables support in the kernel RPC server for
+ registering kernel RPC services via version 4 of the rpcbind
+ protocol. If you enable this option, you must run a portmapper
+ daemon that supports rpcbind protocol version 4.
+
+ Serving NFS over IPv6 from knfsd (the kernel's NFS server)
+ requires that you enable this option and use a portmapper that
+ supports rpcbind version 4.
+
+ If unsure, say N to get traditional behavior (register kernel
+ RPC services using only rpcbind version 2). Distributions
+ using the legacy Linux portmapper daemon must say N here.
+
+config RPCSEC_GSS_KRB5
+ tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)"
+ depends on SUNRPC && EXPERIMENTAL
+ select SUNRPC_GSS
+ select CRYPTO
+ select CRYPTO_MD5
+ select CRYPTO_DES
+ select CRYPTO_CBC
+ help
+ Choose Y here to enable Secure RPC using the Kerberos version 5
+ GSS-API mechanism (RFC 1964).
+
+ Secure RPC calls with Kerberos require an auxiliary user-space
+ daemon which may be found in the Linux nfs-utils package
+ available from http://linux-nfs.org/. In addition, user-space
+ Kerberos support should be installed.
+
+ If unsure, say N.
+
+config RPCSEC_GSS_SPKM3
+ tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
+ depends on SUNRPC && EXPERIMENTAL
+ select SUNRPC_GSS
+ select CRYPTO
+ select CRYPTO_MD5
+ select CRYPTO_DES
+ select CRYPTO_CAST5
+ select CRYPTO_CBC
+ help
+ Choose Y here to enable Secure RPC using the SPKM3 public key
+ GSS-API mechansim (RFC 2025).
+
+ Secure RPC calls with SPKM3 require an auxiliary userspace
+ daemon which may be found in the Linux nfs-utils package
+ available from http://linux-nfs.org/.
+
+ If unsure, say N.
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5aa024b99c5..2f2d731bc1c 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -124,7 +124,7 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff)
{
- int stop = sizeof(nm_a->map) / sizeof(u32);
+ int stop = ARRAY_SIZE(nm_a->map);
int w;
int b;
u32 map;
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
index 87cf4430079..94d216a4640 100644
--- a/net/wimax/debugfs.c
+++ b/net/wimax/debugfs.c
@@ -28,17 +28,6 @@
#include "debug-levels.h"
-/* Debug framework control of debug levels */
-struct d_level D_LEVEL[] = {
- D_SUBMODULE_DEFINE(debugfs),
- D_SUBMODULE_DEFINE(id_table),
- D_SUBMODULE_DEFINE(op_msg),
- D_SUBMODULE_DEFINE(op_reset),
- D_SUBMODULE_DEFINE(op_rfkill),
- D_SUBMODULE_DEFINE(stack),
-};
-size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
-
#define __debugfs_register(prefix, name, parent) \
do { \
result = d_level_register_debugfs(prefix, name, parent); \
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index d4da92f8981..3869c032788 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev)
}
EXPORT_SYMBOL_GPL(wimax_dev_rm);
+
+/* Debug framework control of debug levels */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(debugfs),
+ D_SUBMODULE_DEFINE(id_table),
+ D_SUBMODULE_DEFINE(op_msg),
+ D_SUBMODULE_DEFINE(op_reset),
+ D_SUBMODULE_DEFINE(op_rfkill),
+ D_SUBMODULE_DEFINE(stack),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
struct genl_family wimax_gnl_family = {
.id = GENL_ID_GENERATE,
.name = "WiMAX",
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4f877535e66..85c9034c59b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -421,6 +421,31 @@ static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range,
return 0;
}
+/**
+ * freq_in_rule_band - tells us if a frequency is in a frequency band
+ * @freq_range: frequency rule we want to query
+ * @freq_khz: frequency we are inquiring about
+ *
+ * This lets us know if a specific frequency rule is or is not relevant to
+ * a specific frequency's band. Bands are device specific and artificial
+ * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is
+ * safe for now to assume that a frequency rule should not be part of a
+ * frequency's band if the start freq or end freq are off by more than 2 GHz.
+ * This resolution can be lowered and should be considered as we add
+ * regulatory rule support for other "bands".
+ **/
+static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
+ u32 freq_khz)
+{
+#define ONE_GHZ_IN_KHZ 1000000
+ if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
+ return true;
+ if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
+ return true;
+ return false;
+#undef ONE_GHZ_IN_KHZ
+}
+
/* Converts a country IE to a regulatory domain. A regulatory domain
* structure has a lot of information which the IE doesn't yet have,
* so for the other values we use upper max values as we will intersect
@@ -473,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
* calculate the number of reg rules we will need. We will need one
* for each channel subband */
while (country_ie_len >= 3) {
+ int end_channel = 0;
struct ieee80211_country_ie_triplet *triplet =
(struct ieee80211_country_ie_triplet *) country_ie;
int cur_sub_max_channel = 0, cur_channel = 0;
@@ -484,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue;
}
+ /* 2 GHz */
+ if (triplet->chans.first_channel <= 14)
+ end_channel = triplet->chans.first_channel +
+ triplet->chans.num_channels;
+ else
+ /*
+ * 5 GHz -- For example in country IEs if the first
+ * channel given is 36 and the number of channels is 4
+ * then the individual channel numbers defined for the
+ * 5 GHz PHY by these parameters are: 36, 40, 44, and 48
+ * and not 36, 37, 38, 39.
+ *
+ * See: http://tinyurl.com/11d-clarification
+ */
+ end_channel = triplet->chans.first_channel +
+ (4 * (triplet->chans.num_channels - 1));
+
cur_channel = triplet->chans.first_channel;
- cur_sub_max_channel = ieee80211_channel_to_frequency(
- cur_channel + triplet->chans.num_channels);
+ cur_sub_max_channel = end_channel;
/* Basic sanity check */
if (cur_sub_max_channel < cur_channel)
@@ -538,6 +580,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
/* This time around we fill in the rd */
while (country_ie_len >= 3) {
+ int end_channel = 0;
struct ieee80211_country_ie_triplet *triplet =
(struct ieee80211_country_ie_triplet *) country_ie;
struct ieee80211_reg_rule *reg_rule = NULL;
@@ -559,6 +602,14 @@ static struct ieee80211_regdomain *country_ie_2_rd(
reg_rule->flags = flags;
+ /* 2 GHz */
+ if (triplet->chans.first_channel <= 14)
+ end_channel = triplet->chans.first_channel +
+ triplet->chans.num_channels;
+ else
+ end_channel = triplet->chans.first_channel +
+ (4 * (triplet->chans.num_channels - 1));
+
/* The +10 is since the regulatory domain expects
* the actual band edge, not the center of freq for
* its start and end freqs, assuming 20 MHz bandwidth on
@@ -568,8 +619,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
triplet->chans.first_channel) - 10);
freq_range->end_freq_khz =
MHZ_TO_KHZ(ieee80211_channel_to_frequency(
- triplet->chans.first_channel +
- triplet->chans.num_channels) + 10);
+ end_channel) + 10);
/* Large arbitrary values, we intersect later */
/* Increment this if we ever support >= 40 MHz channels
@@ -748,12 +798,23 @@ static u32 map_regdom_flags(u32 rd_flags)
* this value to the maximum allowed bandwidth.
* @reg_rule: the regulatory rule which we have for this frequency
*
- * Use this function to get the regulatory rule for a specific frequency.
+ * Use this function to get the regulatory rule for a specific frequency on
+ * a given wireless device. If the device has a specific regulatory domain
+ * it wants to follow we respect that unless a country IE has been received
+ * and processed already.
+ *
+ * Returns 0 if it was able to find a valid regulatory rule which does
+ * apply to the given center_freq otherwise it returns non-zero. It will
+ * also return -ERANGE if we determine the given center_freq does not even have
+ * a regulatory rule for a frequency range in the center_freq's band. See
+ * freq_in_rule_band() for our current definition of a band -- this is purely
+ * subjective and right now its 802.11 specific.
*/
static int freq_reg_info(u32 center_freq, u32 *bandwidth,
const struct ieee80211_reg_rule **reg_rule)
{
int i;
+ bool band_rule_found = false;
u32 max_bandwidth = 0;
if (!cfg80211_regdomain)
@@ -767,7 +828,15 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth,
rr = &cfg80211_regdomain->reg_rules[i];
fr = &rr->freq_range;
pr = &rr->power_rule;
+
+ /* We only need to know if one frequency rule was
+ * was in center_freq's band, that's enough, so lets
+ * not overwrite it once found */
+ if (!band_rule_found)
+ band_rule_found = freq_in_rule_band(fr, center_freq);
+
max_bandwidth = freq_max_bandwidth(fr, center_freq);
+
if (max_bandwidth && *bandwidth <= max_bandwidth) {
*reg_rule = rr;
*bandwidth = max_bandwidth;
@@ -775,23 +844,64 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth,
}
}
+ if (!band_rule_found)
+ return -ERANGE;
+
return !max_bandwidth;
}
-static void handle_channel(struct ieee80211_channel *chan)
+static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
+ unsigned int chan_idx)
{
int r;
- u32 flags = chan->orig_flags;
+ u32 flags;
u32 max_bandwidth = 0;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+
+ sband = wiphy->bands[band];
+ BUG_ON(chan_idx >= sband->n_channels);
+ chan = &sband->channels[chan_idx];
+
+ flags = chan->orig_flags;
r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq),
&max_bandwidth, &reg_rule);
if (r) {
- flags |= IEEE80211_CHAN_DISABLED;
- chan->flags = flags;
+ /* This means no regulatory rule was found in the country IE
+ * with a frequency range on the center_freq's band, since
+ * IEEE-802.11 allows for a country IE to have a subset of the
+ * regulatory information provided in a country we ignore
+ * disabling the channel unless at least one reg rule was
+ * found on the center_freq's band. For details see this
+ * clarification:
+ *
+ * http://tinyurl.com/11d-clarification
+ */
+ if (r == -ERANGE &&
+ last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) {
+#ifdef CONFIG_CFG80211_REG_DEBUG
+ printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
+ "intact on %s - no rule found in band on "
+ "Country IE\n",
+ chan->center_freq, wiphy_name(wiphy));
+#endif
+ } else {
+ /* In this case we know the country IE has at least one reg rule
+ * for the band so we respect its band definitions */
+#ifdef CONFIG_CFG80211_REG_DEBUG
+ if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
+ printk(KERN_DEBUG "cfg80211: Disabling "
+ "channel %d MHz on %s due to "
+ "Country IE\n",
+ chan->center_freq, wiphy_name(wiphy));
+#endif
+ flags |= IEEE80211_CHAN_DISABLED;
+ chan->flags = flags;
+ }
return;
}
@@ -808,12 +918,16 @@ static void handle_channel(struct ieee80211_channel *chan)
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
-static void handle_band(struct ieee80211_supported_band *sband)
+static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
{
- int i;
+ unsigned int i;
+ struct ieee80211_supported_band *sband;
+
+ BUG_ON(!wiphy->bands[band]);
+ sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
- handle_channel(&sband->channels[i]);
+ handle_channel(wiphy, band, i);
}
static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby)
@@ -840,7 +954,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby)
enum ieee80211_band band;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
- handle_band(wiphy->bands[band]);
+ handle_band(wiphy, band);
if (wiphy->reg_notifier)
wiphy->reg_notifier(wiphy, setby);
}
@@ -1170,7 +1284,7 @@ static void reg_country_ie_process_debug(
if (intersected_rd) {
printk(KERN_DEBUG "cfg80211: We intersect both of these "
"and get:\n");
- print_regdomain_info(rd);
+ print_regdomain_info(intersected_rd);
return;
}
printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index c609a4b98e1..42cd18391f4 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -63,7 +63,6 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
if (len > skb_tailroom(skb))
len = skb_tailroom(skb);
- skb->truesize += len;
__skb_put(skb, len);
len += plen;