diff options
Diffstat (limited to 'net')
600 files changed, 21113 insertions, 13713 deletions
diff --git a/net/802/Makefile b/net/802/Makefile index 7893d679910..a30d6e385ae 100644 --- a/net/802/Makefile +++ b/net/802/Makefile @@ -4,7 +4,6 @@ # Check the p8022 selections against net/core/Makefile. obj-$(CONFIG_LLC) += p8022.o psnap.o -obj-$(CONFIG_TR) += p8022.o psnap.o tr.o obj-$(CONFIG_NET_FC) += fc.o obj-$(CONFIG_FDDI) += fddi.o obj-$(CONFIG_HIPPI) += hippi.o diff --git a/net/802/fc.c b/net/802/fc.c index b324e31401a..05eea6b98bb 100644 --- a/net/802/fc.c +++ b/net/802/fc.c @@ -35,7 +35,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct fch_hdr *fch; int hdr_len; diff --git a/net/802/fddi.c b/net/802/fddi.c index 5ab25cd4314..9cda40661e0 100644 --- a/net/802/fddi.c +++ b/net/802/fddi.c @@ -51,7 +51,7 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { int hl = FDDI_K_SNAP_HLEN; struct fddihdr *fddi; diff --git a/net/802/garp.c b/net/802/garp.c index a5c22483043..8456f5d98b8 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -157,9 +157,9 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, while (parent) { attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); - if (d < 0) + if (d > 0) parent = parent->rb_left; - else if (d > 0) + else if (d < 0) parent = parent->rb_right; else return attr; @@ -178,9 +178,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app, parent = *p; attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); - if (d < 0) + if (d > 0) p = &parent->rb_left; - else if (d > 0) + else if (d < 0) p = &parent->rb_right; else { /* The attribute already exists; re-use it. */ diff --git a/net/802/hippi.c b/net/802/hippi.c index 056794e6637..51a1f530417 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -45,7 +45,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; diff --git a/net/802/p8022.c b/net/802/p8022.c index 7f353c4f437..0bda8de7df5 100644 --- a/net/802/p8022.c +++ b/net/802/p8022.c @@ -1,6 +1,5 @@ /* - * NET3: Support for 802.2 demultiplexing off Ethernet (Token ring - * is kept separate see p8022tr.c) + * NET3: Support for 802.2 demultiplexing off Ethernet * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version diff --git a/net/802/stp.c b/net/802/stp.c index 15540b7323c..2c40ba0ec11 100644 --- a/net/802/stp.c +++ b/net/802/stp.c @@ -46,7 +46,7 @@ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev, proto = rcu_dereference(garp_protos[eh->h_dest[5] - GARP_ADDR_MIN]); if (proto && - compare_ether_addr(eh->h_dest, proto->group_address)) + !ether_addr_equal(eh->h_dest, proto->group_address)) goto err; } else proto = rcu_dereference(stp_proto); diff --git a/net/802/tr.c b/net/802/tr.c deleted file mode 100644 index b9a3a145e34..00000000000 --- a/net/802/tr.c +++ /dev/null @@ -1,676 +0,0 @@ -/* - * NET3: Token ring device handling subroutines - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes. - * Added rif table to /proc/net/tr_rif and rif timeout to - * /proc/sys/net/token-ring/rif_timeout. - * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged - * tr_header and tr_type_trans to handle passing IPX SNAP and - * 802.2 through the correct layers. Eliminated tr_reformat. - * - */ - -#include <asm/uaccess.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/jiffies.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/inet.h> -#include <linux/netdevice.h> -#include <linux/trdevice.h> -#include <linux/skbuff.h> -#include <linux/errno.h> -#include <linux/timer.h> -#include <linux/net.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/sysctl.h> -#include <linux/slab.h> -#include <net/arp.h> -#include <net/net_namespace.h> - -static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev); -static void rif_check_expire(unsigned long dummy); - -#define TR_SR_DEBUG 0 - -/* - * Each RIF entry we learn is kept this way - */ - -struct rif_cache { - unsigned char addr[TR_ALEN]; - int iface; - __be16 rcf; - __be16 rseg[8]; - struct rif_cache *next; - unsigned long last_used; - unsigned char local_ring; -}; - -#define RIF_TABLE_SIZE 32 - -/* - * We hash the RIF cache 32 ways. We do after all have to look it - * up a lot. - */ - -static struct rif_cache *rif_table[RIF_TABLE_SIZE]; - -static DEFINE_SPINLOCK(rif_lock); - - -/* - * Garbage disposal timer. - */ - -static struct timer_list rif_timer; - -static int sysctl_tr_rif_timeout = 60*10*HZ; - -static inline unsigned long rif_hash(const unsigned char *addr) -{ - unsigned long x; - - x = addr[0]; - x = (x << 2) ^ addr[1]; - x = (x << 2) ^ addr[2]; - x = (x << 2) ^ addr[3]; - x = (x << 2) ^ addr[4]; - x = (x << 2) ^ addr[5]; - - x ^= x >> 8; - - return x & (RIF_TABLE_SIZE - 1); -} - -/* - * Put the headers on a token ring packet. Token ring source routing - * makes this a little more exciting than on ethernet. - */ - -static int tr_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, - const void *daddr, const void *saddr, unsigned len) -{ - struct trh_hdr *trh; - int hdr_len; - - /* - * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls - * dev->hard_header directly. - */ - if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP) - { - struct trllc *trllc; - - hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc); - trh = (struct trh_hdr *)skb_push(skb, hdr_len); - trllc = (struct trllc *)(trh+1); - trllc->dsap = trllc->ssap = EXTENDED_SAP; - trllc->llc = UI_CMD; - trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00; - trllc->ethertype = htons(type); - } - else - { - hdr_len = sizeof(struct trh_hdr); - trh = (struct trh_hdr *)skb_push(skb, hdr_len); - } - - trh->ac=AC; - trh->fc=LLC_FRAME; - - if(saddr) - memcpy(trh->saddr,saddr,dev->addr_len); - else - memcpy(trh->saddr,dev->dev_addr,dev->addr_len); - - /* - * Build the destination and then source route the frame - */ - - if(daddr) - { - memcpy(trh->daddr,daddr,dev->addr_len); - tr_source_route(skb, trh, dev); - return hdr_len; - } - - return -hdr_len; -} - -/* - * A neighbour discovery of some species (eg arp) has completed. We - * can now send the packet. - */ - -static int tr_rebuild_header(struct sk_buff *skb) -{ - struct trh_hdr *trh=(struct trh_hdr *)skb->data; - struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); - struct net_device *dev = skb->dev; - - /* - * FIXME: We don't yet support IPv6 over token rings - */ - - if(trllc->ethertype != htons(ETH_P_IP)) { - printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype)); - return 0; - } - -#ifdef CONFIG_INET - if(arp_find(trh->daddr, skb)) { - return 1; - } - else -#endif - { - tr_source_route(skb,trh,dev); - return 0; - } -} - -/* - * Some of this is a bit hackish. We intercept RIF information - * used for source routing. We also grab IP directly and don't feed - * it via SNAP. - */ - -__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) -{ - - struct trh_hdr *trh; - struct trllc *trllc; - unsigned riflen=0; - - skb->dev = dev; - skb_reset_mac_header(skb); - trh = tr_hdr(skb); - - if(trh->saddr[0] & TR_RII) - riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; - - trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); - - skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); - - if(*trh->daddr & 0x80) - { - if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN)) - skb->pkt_type=PACKET_BROADCAST; - else - skb->pkt_type=PACKET_MULTICAST; - } - else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E)) - { - skb->pkt_type=PACKET_MULTICAST; - } - else if(dev->flags & IFF_PROMISC) - { - if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN)) - skb->pkt_type=PACKET_OTHERHOST; - } - - if ((skb->pkt_type != PACKET_BROADCAST) && - (skb->pkt_type != PACKET_MULTICAST)) - tr_add_rif_info(trh,dev) ; - - /* - * Strip the SNAP header from ARP packets since we don't - * pass them through to the 802.2/SNAP layers. - */ - - if (trllc->dsap == EXTENDED_SAP && - (trllc->ethertype == htons(ETH_P_IP) || - trllc->ethertype == htons(ETH_P_IPV6) || - trllc->ethertype == htons(ETH_P_ARP))) - { - skb_pull(skb, sizeof(struct trllc)); - return trllc->ethertype; - } - - return htons(ETH_P_TR_802_2); -} - -/* - * We try to do source routing... - */ - -void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh, - struct net_device *dev) -{ - int slack; - unsigned int hash; - struct rif_cache *entry; - unsigned char *olddata; - unsigned long flags; - static const unsigned char mcast_func_addr[] - = {0xC0,0x00,0x00,0x04,0x00,0x00}; - - spin_lock_irqsave(&rif_lock, flags); - - /* - * Broadcasts are single route as stated in RFC 1042 - */ - if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) || - (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) ) - { - trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) - | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); - trh->saddr[0]|=TR_RII; - } - else - { - hash = rif_hash(trh->daddr); - /* - * Walk the hash table and look for an entry - */ - for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next); - - /* - * If we found an entry we can route the frame. - */ - if(entry) - { -#if TR_SR_DEBUG -printk("source routing for %pM\n", trh->daddr); -#endif - if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8) - { - trh->rcf=entry->rcf; - memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short)); - trh->rcf^=htons(TR_RCF_DIR_BIT); - trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */ - - trh->saddr[0]|=TR_RII; -#if TR_SR_DEBUG - printk("entry found with rcf %04x\n", entry->rcf); - } - else - { - printk("entry found but without rcf length, local=%02x\n", entry->local_ring); -#endif - } - entry->last_used=jiffies; - } - else - { - /* - * Without the information we simply have to shout - * on the wire. The replies should rapidly clean this - * situation up. - */ - trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) - | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); - trh->saddr[0]|=TR_RII; -#if TR_SR_DEBUG - printk("no entry in rif table found - broadcasting frame\n"); -#endif - } - } - - /* Compress the RIF here so we don't have to do it in the driver(s) */ - if (!(trh->saddr[0] & 0x80)) - slack = 18; - else - slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); - olddata = skb->data; - spin_unlock_irqrestore(&rif_lock, flags); - - skb_pull(skb, slack); - memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); -} - -/* - * We have learned some new RIF information for our source - * routing. - */ - -static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) -{ - unsigned int hash, rii_p = 0; - unsigned long flags; - struct rif_cache *entry; - unsigned char saddr0; - - spin_lock_irqsave(&rif_lock, flags); - saddr0 = trh->saddr[0]; - - /* - * Firstly see if the entry exists - */ - - if(trh->saddr[0] & TR_RII) - { - trh->saddr[0]&=0x7f; - if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2) - { - rii_p = 1; - } - } - - hash = rif_hash(trh->saddr); - for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next); - - if(entry==NULL) - { -#if TR_SR_DEBUG - printk("adding rif_entry: addr:%pM rcf:%04X\n", - trh->saddr, ntohs(trh->rcf)); -#endif - /* - * Allocate our new entry. A failure to allocate loses - * use the information. This is harmless. - * - * FIXME: We ought to keep some kind of cache size - * limiting and adjust the timers to suit. - */ - entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC); - - if(!entry) - { - printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); - spin_unlock_irqrestore(&rif_lock, flags); - return; - } - - memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN); - entry->iface = dev->ifindex; - entry->next=rif_table[hash]; - entry->last_used=jiffies; - rif_table[hash]=entry; - - if (rii_p) - { - entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); - memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); - entry->local_ring = 0; - } - else - { - entry->local_ring = 1; - } - } - else /* Y. Tahara added */ - { - /* - * Update existing entries - */ - if (!entry->local_ring) - if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) && - !(trh->rcf & htons(TR_RCF_BROADCAST_MASK))) - { -#if TR_SR_DEBUG -printk("updating rif_entry: addr:%pM rcf:%04X\n", - trh->saddr, ntohs(trh->rcf)); -#endif - entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); - memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); - } - entry->last_used=jiffies; - } - trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */ - spin_unlock_irqrestore(&rif_lock, flags); -} - -/* - * Scan the cache with a timer and see what we need to throw out. - */ - -static void rif_check_expire(unsigned long dummy) -{ - int i; - unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; - - spin_lock_irqsave(&rif_lock, flags); - - for(i =0; i < RIF_TABLE_SIZE; i++) { - struct rif_cache *entry, **pentry; - - pentry = rif_table+i; - while((entry=*pentry) != NULL) { - unsigned long expires - = entry->last_used + sysctl_tr_rif_timeout; - - if (time_before_eq(expires, jiffies)) { - *pentry = entry->next; - kfree(entry); - } else { - pentry = &entry->next; - - if (time_before(expires, next_interval)) - next_interval = expires; - } - } - } - - spin_unlock_irqrestore(&rif_lock, flags); - - mod_timer(&rif_timer, next_interval); - -} - -/* - * Generate the /proc/net information for the token ring RIF - * routing. - */ - -#ifdef CONFIG_PROC_FS - -static struct rif_cache *rif_get_idx(loff_t pos) -{ - int i; - struct rif_cache *entry; - loff_t off = 0; - - for(i = 0; i < RIF_TABLE_SIZE; i++) - for(entry = rif_table[i]; entry; entry = entry->next) { - if (off == pos) - return entry; - ++off; - } - - return NULL; -} - -static void *rif_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(&rif_lock) -{ - spin_lock_irq(&rif_lock); - - return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; -} - -static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - int i; - struct rif_cache *ent = v; - - ++*pos; - - if (v == SEQ_START_TOKEN) { - i = -1; - goto scan; - } - - if (ent->next) - return ent->next; - - i = rif_hash(ent->addr); - scan: - while (++i < RIF_TABLE_SIZE) { - if ((ent = rif_table[i]) != NULL) - return ent; - } - return NULL; -} - -static void rif_seq_stop(struct seq_file *seq, void *v) - __releases(&rif_lock) -{ - spin_unlock_irq(&rif_lock); -} - -static int rif_seq_show(struct seq_file *seq, void *v) -{ - int j, rcf_len, segment, brdgnmb; - struct rif_cache *entry = v; - - if (v == SEQ_START_TOKEN) - seq_puts(seq, - "if TR address TTL rcf routing segments\n"); - else { - struct net_device *dev = dev_get_by_index(&init_net, entry->iface); - long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout) - - (long) jiffies; - - seq_printf(seq, "%s %pM %7li ", - dev?dev->name:"?", - entry->addr, - ttl/HZ); - - if (entry->local_ring) - seq_puts(seq, "local\n"); - else { - - seq_printf(seq, "%04X", ntohs(entry->rcf)); - rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2; - if (rcf_len) - rcf_len >>= 1; - for(j = 1; j < rcf_len; j++) { - if(j==1) { - segment=ntohs(entry->rseg[j-1])>>4; - seq_printf(seq," %03X",segment); - } - - segment=ntohs(entry->rseg[j])>>4; - brdgnmb=ntohs(entry->rseg[j-1])&0x00f; - seq_printf(seq,"-%01X-%03X",brdgnmb,segment); - } - seq_putc(seq, '\n'); - } - - if (dev) - dev_put(dev); - } - return 0; -} - - -static const struct seq_operations rif_seq_ops = { - .start = rif_seq_start, - .next = rif_seq_next, - .stop = rif_seq_stop, - .show = rif_seq_show, -}; - -static int rif_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &rif_seq_ops); -} - -static const struct file_operations rif_seq_fops = { - .owner = THIS_MODULE, - .open = rif_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -#endif - -static const struct header_ops tr_header_ops = { - .create = tr_header, - .rebuild= tr_rebuild_header, -}; - -static void tr_setup(struct net_device *dev) -{ - /* - * Configure and register - */ - - dev->header_ops = &tr_header_ops; - - dev->type = ARPHRD_IEEE802_TR; - dev->hard_header_len = TR_HLEN; - dev->mtu = 2000; - dev->addr_len = TR_ALEN; - dev->tx_queue_len = 100; /* Long queues on tr */ - - memset(dev->broadcast,0xFF, TR_ALEN); - - /* New-style flags. */ - dev->flags = IFF_BROADCAST | IFF_MULTICAST ; -} - -/** - * alloc_trdev - Register token ring device - * @sizeof_priv: Size of additional driver-private structure to be allocated - * for this token ring device - * - * Fill in the fields of the device structure with token ring-generic values. - * - * Constructs a new net device, complete with a private data area of - * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for - * this private data area. - */ -struct net_device *alloc_trdev(int sizeof_priv) -{ - return alloc_netdev(sizeof_priv, "tr%d", tr_setup); -} - -#ifdef CONFIG_SYSCTL -static struct ctl_table tr_table[] = { - { - .procname = "rif_timeout", - .data = &sysctl_tr_rif_timeout, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { }, -}; - -static __initdata struct ctl_path tr_path[] = { - { .procname = "net", }, - { .procname = "token-ring", }, - { } -}; -#endif - -/* - * Called during bootup. We don't actually have to initialise - * too much for this. - */ - -static int __init rif_init(void) -{ - rif_timer.expires = jiffies + sysctl_tr_rif_timeout; - setup_timer(&rif_timer, rif_check_expire, 0); - add_timer(&rif_timer); -#ifdef CONFIG_SYSCTL - register_sysctl_paths(tr_path, tr_table); -#endif - proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); - return 0; -} - -module_init(rif_init); - -EXPORT_SYMBOL(tr_type_trans); -EXPORT_SYMBOL(alloc_trdev); - -MODULE_LICENSE("GPL"); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index efea35b02e7..9096bcb0813 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -266,19 +266,19 @@ static void vlan_sync_address(struct net_device *dev, struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); /* May be called without an actual change */ - if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) + if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) return; /* vlan address was different from the old address and is equal to * the new address */ - if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && - !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) + if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_del(dev, vlandev->dev_addr); /* vlan address was equal to the old address and is different from * the new address */ - if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && - compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) + if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_add(dev, vlandev->dev_addr); memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); @@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, break; case NETDEV_DOWN: + if (dev->features & NETIF_F_HW_VLAN_FILTER) + vlan_vid_del(dev, 0); + /* Put all VLANs for this dev in the down state too. */ for (i = 0; i < VLAN_N_VID; i++) { vlandev = vlan_group_get_device(grp, i); diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4d39d802be2..8ca533c95de 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -31,8 +31,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ - if (!compare_ether_addr(eth_hdr(skb)->h_dest, - vlan_dev->dev_addr)) + if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9757c193c86..da1bc9c3cf3 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -277,7 +277,7 @@ static int vlan_dev_open(struct net_device *dev) !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) goto out; @@ -307,7 +307,7 @@ clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); del_unicast: - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: netif_carrier_off(dev); @@ -326,7 +326,7 @@ static int vlan_dev_stop(struct net_device *dev) if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, -1); - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); netif_carrier_off(dev); @@ -345,13 +345,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) if (!(dev->flags & IFF_UP)) goto out; - if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { + if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { err = dev_uc_add(real_dev, addr->sa_data); if (err < 0) return err; } - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 50711368ad6..708c80ea187 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -166,11 +166,13 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) struct nlattr *nest; unsigned int i; - NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id); + if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id)) + goto nla_put_failure; if (vlan->flags) { f.flags = vlan->flags; f.mask = ~0; - NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); + if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f)) + goto nla_put_failure; } if (vlan->nr_ingress_mappings) { nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); @@ -183,8 +185,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) m.from = i; m.to = vlan->ingress_priority_map[i]; - NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, - sizeof(m), &m); + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; } nla_nest_end(skb, nest); } @@ -202,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) m.from = pm->priority; m.to = (pm->vlan_qos >> 13) & 0x7; - NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, - sizeof(m), &m); + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; } } nla_nest_end(skb, nest); diff --git a/net/9p/client.c b/net/9p/client.c index b23a17c431c..a170893d70e 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1530,7 +1530,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; @@ -1605,7 +1605,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, struct p9_req_t *req; p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; @@ -2040,7 +2040,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) char *dataptr; p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 9ee48cb3017..3d33ecf1332 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -368,7 +368,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, const char *sptr = va_arg(ap, const char *); uint16_t len = 0; if (sptr) - len = min_t(uint16_t, strlen(sptr), + len = min_t(size_t, strlen(sptr), USHRT_MAX); errcode = p9pdu_writef(pdu, proto_version, diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index fccae26fa67..6449bae1570 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -513,7 +513,7 @@ error: clear_bit(Wworksched, &m->wsched); } -static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) { struct p9_poll_wait *pwait = container_of(wait, struct p9_poll_wait, wait); diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 052d343d43f..2a167658bb9 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -615,7 +615,8 @@ static void p9_virtio_remove(struct virtio_device *vdev) { struct virtio_chan *chan = vdev->priv; - BUG_ON(chan->inuse); + if (chan->inuse) + p9_virtio_close(chan->client); vdev->config->del_vqs(vdev); mutex_lock(&virtio_9p_lock); diff --git a/net/Kconfig b/net/Kconfig index e07272d0bb2..245831bec09 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -207,10 +207,10 @@ source "net/ipx/Kconfig" source "drivers/net/appletalk/Kconfig" source "net/x25/Kconfig" source "net/lapb/Kconfig" -source "net/econet/Kconfig" source "net/wanrouter/Kconfig" source "net/phonet/Kconfig" source "net/ieee802154/Kconfig" +source "net/mac802154/Kconfig" source "net/sched/Kconfig" source "net/dcb/Kconfig" source "net/dns_resolver/Kconfig" @@ -246,9 +246,6 @@ config BQL select DQL default y -config HAVE_BPF_JIT - bool - config BPF_JIT bool "enable BPF Just In Time compiler" depends on HAVE_BPF_JIT @@ -295,7 +292,7 @@ config NET_TCPPROBE module will be called tcp_probe. config NET_DROP_MONITOR - boolean "Network packet drop alerting service" + tristate "Network packet drop alerting service" depends on INET && EXPERIMENTAL && TRACEPOINTS ---help--- This feature provides an alerting service to userspace in the @@ -340,3 +337,7 @@ source "net/nfc/Kconfig" endif # if NET + +# Used by archs to tell that they support BPF_JIT +config HAVE_BPF_JIT + bool diff --git a/net/Makefile b/net/Makefile index ad432fa4d93..4f4ee083064 100644 --- a/net/Makefile +++ b/net/Makefile @@ -40,7 +40,6 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_L2TP) += l2tp/ obj-$(CONFIG_DECNET) += decnet/ -obj-$(CONFIG_ECONET) += econet/ obj-$(CONFIG_PHONET) += phonet/ ifneq ($(CONFIG_VLAN_8021Q),) obj-y += 8021q/ @@ -60,6 +59,7 @@ ifneq ($(CONFIG_DCB),) obj-y += dcb/ endif obj-$(CONFIG_IEEE802154) += ieee802154/ +obj-$(CONFIG_MAC802154) += mac802154/ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_SYSCTL) += sysctl_net.o diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index bfa9ab93eda..86852963b7f 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -63,7 +63,7 @@ #include <net/tcp_states.h> #include <net/route.h> #include <linux/atalk.h> -#include "../core/kmap_skb.h" +#include <linux/highmem.h> struct datalink_proto *ddp_dl, *aarp_dl; static const struct proto_ops atalk_dgram_ops; @@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); sum = atalk_sum_partial(vaddr + frag->page_offset + offset - start, copy, sum); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); if (!(len -= copy)) return sum; @@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, if (addr->sat_addr.s_node == ATADDR_BCAST && !sock_flag(sk, SOCK_BROADCAST)) { #if 1 - printk(KERN_WARNING "%s is broken and did not set " - "SO_BROADCAST. It will break when 2.2 is " - "released.\n", + pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n", current->comm); #else return -EACCES; diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c index 04e9c0da7aa..ebb864361f7 100644 --- a/net/appletalk/sysctl_net_atalk.c +++ b/net/appletalk/sysctl_net_atalk.c @@ -42,20 +42,14 @@ static struct ctl_table atalk_table[] = { { }, }; -static struct ctl_path atalk_path[] = { - { .procname = "net", }, - { .procname = "appletalk", }, - { } -}; - static struct ctl_table_header *atalk_table_header; void atalk_register_sysctl(void) { - atalk_table_header = register_sysctl_paths(atalk_path, atalk_table); + atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table); } void atalk_unregister_sysctl(void) { - unregister_sysctl_table(atalk_table_header); + unregister_net_sysctl_table(atalk_table_header); } diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 353fccf1cde..4819d31533e 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -73,7 +73,7 @@ struct br2684_vcc { #ifdef CONFIG_ATM_BR2684_IPFILTER struct br2684_filter filter; #endif /* CONFIG_ATM_BR2684_IPFILTER */ - unsigned copies_needed, copies_failed; + unsigned int copies_needed, copies_failed; }; struct br2684_dev { diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index 62dc8bfe6fe..bbd3b639992 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -97,9 +97,8 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, error = sock_get_timestampns(sk, argp); goto done; case ATM_SETSC: - if (net_ratelimit()) - pr_warning("ATM_SETSC is obsolete; used by %s:%d\n", - current->comm, task_pid_nr(current)); + net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n", + current->comm, task_pid_nr(current)); error = 0; goto done; case ATMSIGD_CTRL: @@ -123,8 +122,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, work for 32-bit userspace. TBH I don't really want to think about it at all. dwmw2. */ if (compat) { - if (net_ratelimit()) - pr_warning("32-bit task cannot be atmsigd\n"); + net_warn_ratelimited("32-bit task cannot be atmsigd\n"); error = -EINVAL; goto done; } diff --git a/net/atm/lec.c b/net/atm/lec.c index f1964caa0f8..a7d172105c9 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -26,11 +26,6 @@ #include <linux/spinlock.h> #include <linux/seq_file.h> -/* TokenRing if needed */ -#ifdef CONFIG_TR -#include <linux/trdevice.h> -#endif - /* And atm device */ #include <linux/atmdev.h> #include <linux/atmlec.h> @@ -163,50 +158,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ /* - * Modelled after tr_type_trans - * All multicast and ARE or STE frames go to BUS. - * Non source routed frames go by destination address. - * Last hop source routed frames go by destination address. - * Not last hop source routed frames go by _next_ route descriptor. - * Returns pointer to destination MAC address or fills in rdesc - * and returns NULL. - */ -#ifdef CONFIG_TR -static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) -{ - struct trh_hdr *trh; - unsigned int riflen, num_rdsc; - - trh = (struct trh_hdr *)packet; - if (trh->daddr[0] & (uint8_t) 0x80) - return bus_mac; /* multicast */ - - if (trh->saddr[0] & TR_RII) { - riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; - if ((ntohs(trh->rcf) >> 13) != 0) - return bus_mac; /* ARE or STE */ - } else - return trh->daddr; /* not source routed */ - - if (riflen < 6) - return trh->daddr; /* last hop, source routed */ - - /* riflen is 6 or more, packet has more than one route descriptor */ - num_rdsc = (riflen / 2) - 1; - memset(rdesc, 0, ETH_ALEN); - /* offset 4 comes from LAN destination field in LE control frames */ - if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) - memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16)); - else { - memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16)); - rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); - } - - return NULL; -} -#endif /* CONFIG_TR */ - -/* * Open/initialize the netdevice. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * @@ -257,9 +208,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct lec_arp_table *entry; unsigned char *dst; int min_frame_size; -#ifdef CONFIG_TR - unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ -#endif int is_rdesc; pr_debug("called\n"); @@ -290,24 +238,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, } skb_push(skb, 2); - /* Put le header to place, works for TokenRing too */ + /* Put le header to place */ lec_h = (struct lecdatahdr_8023 *)skb->data; lec_h->le_header = htons(priv->lecid); -#ifdef CONFIG_TR - /* - * Ugly. Use this to realign Token Ring packets for - * e.g. PCA-200E driver. - */ - if (priv->is_trdev) { - skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); - kfree_skb(skb); - if (skb2 == NULL) - return NETDEV_TX_OK; - skb = skb2; - } -#endif - #if DUMP_PACKETS >= 2 #define MAX_DUMP_SKB 99 #elif DUMP_PACKETS >= 1 @@ -321,12 +255,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, #endif /* DUMP_PACKETS >= 1 */ /* Minimum ethernet-frame size */ -#ifdef CONFIG_TR - if (priv->is_trdev) - min_frame_size = LEC_MINIMUM_8025_SIZE; - else -#endif - min_frame_size = LEC_MINIMUM_8023_SIZE; + min_frame_size = LEC_MINIMUM_8023_SIZE; if (skb->len < min_frame_size) { if ((skb->len + skb_tailroom(skb)) < min_frame_size) { skb2 = skb_copy_expand(skb, 0, @@ -345,15 +274,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, /* Send to right vcc */ is_rdesc = 0; dst = lec_h->h_dest; -#ifdef CONFIG_TR - if (priv->is_trdev) { - dst = get_tr_dst(skb->data + 2, rdesc); - if (dst == NULL) { - dst = rdesc; - is_rdesc = 1; - } - } -#endif entry = NULL; vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", @@ -710,12 +630,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) dev_kfree_skb(skb); return; } -#ifdef CONFIG_TR - if (priv->is_trdev) - dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; - else -#endif - dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; + dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; /* * If this is a Data Direct VCC, and the VCC does not match @@ -723,16 +638,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) */ spin_lock_irqsave(&priv->lec_arp_lock, flags); if (lec_is_data_direct(vcc)) { -#ifdef CONFIG_TR - if (priv->is_trdev) - src = - ((struct lecdatahdr_8025 *)skb->data)-> - h_source; - else -#endif - src = - ((struct lecdatahdr_8023 *)skb->data)-> - h_source; + src = ((struct lecdatahdr_8023 *)skb->data)->h_source; entry = lec_arp_find(priv, src); if (entry && entry->vcc != vcc) { lec_arp_remove(priv, entry); @@ -750,12 +656,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) if (!hlist_empty(&priv->lec_arp_empty_ones)) lec_arp_check_empties(priv, vcc, skb); skb_pull(skb, 2); /* skip lec_id */ -#ifdef CONFIG_TR - if (priv->is_trdev) - skb->protocol = tr_type_trans(skb, dev); - else -#endif - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); @@ -827,27 +728,13 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) i = 0; else i = arg; -#ifdef CONFIG_TR if (arg >= MAX_LEC_ITF) return -EINVAL; -#else /* Reserve the top NUM_TR_DEVS for TR */ - if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS)) - return -EINVAL; -#endif if (!dev_lec[i]) { - int is_trdev, size; - - is_trdev = 0; - if (i >= (MAX_LEC_ITF - NUM_TR_DEVS)) - is_trdev = 1; + int size; size = sizeof(struct lec_priv); -#ifdef CONFIG_TR - if (is_trdev) - dev_lec[i] = alloc_trdev(size); - else -#endif - dev_lec[i] = alloc_etherdev(size); + dev_lec[i] = alloc_etherdev(size); if (!dev_lec[i]) return -ENOMEM; dev_lec[i]->netdev_ops = &lec_netdev_ops; @@ -858,7 +745,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) } priv = netdev_priv(dev_lec[i]); - priv->is_trdev = is_trdev; } else { priv = netdev_priv(dev_lec[i]); if (priv->lecd) @@ -1255,7 +1141,7 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, struct sk_buff *skb; struct lec_priv *priv = netdev_priv(dev); - if (compare_ether_addr(lan_dst, dev->dev_addr)) + if (!ether_addr_equal(lan_dst, dev->dev_addr)) return 0; /* not our mac address */ kfree(priv->tlvs); /* NULL if there was no previous association */ @@ -1662,7 +1548,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; hlist_for_each_entry(entry, node, head, next) { - if (!compare_ether_addr(mac_addr, entry->mac_addr)) + if (ether_addr_equal(mac_addr, entry->mac_addr)) return entry; } return NULL; @@ -1849,7 +1735,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, case 1: return priv->mcast_vcc; case 2: /* LANE2 wants arp for multicast addresses */ - if (!compare_ether_addr(mac_to_find, bus_mac)) + if (ether_addr_equal(mac_to_find, bus_mac)) return priv->mcast_vcc; break; default: @@ -2372,15 +2258,7 @@ lec_arp_check_empties(struct lec_priv *priv, struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; - unsigned char *src; -#ifdef CONFIG_TR - struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data; - - if (priv->is_trdev) - src = tr_hdr->h_source; - else -#endif - src = hdr->h_source; + unsigned char *src = hdr->h_source; spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_for_each_entry_safe(entry, node, next, diff --git a/net/atm/lec.h b/net/atm/lec.h index dfc07196646..a86aff9a3c0 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -55,11 +55,11 @@ struct lane2_ops { * frames. * * 1. Dix Ethernet EtherType frames encoded by placing EtherType - * field in h_type field. Data follows immediatelly after header. + * field in h_type field. Data follows immediately after header. * 2. LLC Data frames whose total length, including LLC field and data, * but not padding required to meet the minimum data frame length, * is less than 1536(0x0600) MUST be encoded by placing that length - * in the h_type field. The LLC field follows header immediatelly. + * in the h_type field. The LLC field follows header immediately. * 3. LLC data frames longer than this maximum MUST be encoded by placing * the value 0 in the h_type field. * @@ -142,7 +142,6 @@ struct lec_priv { int itfnum; /* e.g. 2 for lec2, 5 for lec5 */ struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */ int is_proxy; /* bridge between ATM and Ethernet */ - int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */ }; struct lec_vcc_priv { diff --git a/net/atm/mpc.c b/net/atm/mpc.c index aa972409f09..d4cc1be5c36 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -592,8 +592,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb, goto non_ip; while (i < mpc->number_of_mps_macs) { - if (!compare_ether_addr(eth->h_dest, - (mpc->mps_macs + i*ETH_ALEN))) + if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN)) if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */ return NETDEV_TX_OK; i++; diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 53e50029227..5bdd300db0f 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -207,7 +207,7 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff, size_t nbytes, loff_t *ppos) { char *page, *p; - unsigned len; + unsigned int len; if (nbytes == 0) return 0; diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 614d3fc47ed..ce1e59fdae7 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -62,12 +62,25 @@ struct pppoatm_vcc { void (*old_pop)(struct atm_vcc *, struct sk_buff *); /* keep old push/pop for detaching */ enum pppoatm_encaps encaps; + atomic_t inflight; + unsigned long blocked; int flags; /* SC_COMP_PROT - compress protocol */ struct ppp_channel chan; /* interface to generic ppp layer */ struct tasklet_struct wakeup_tasklet; }; /* + * We want to allow two packets in the queue. The one that's currently in + * flight, and *one* queued up ready for the ATM device to send immediately + * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so + * inflight == -2 represents an empty queue, -1 one packet, and zero means + * there are two packets in the queue. + */ +#define NONE_INFLIGHT -2 + +#define BLOCKED 0 + +/* * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol * ID (0xC021) used in autodetection */ @@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsigned long arg) static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb) { struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); + pvcc->old_pop(atmvcc, skb); + atomic_dec(&pvcc->inflight); + /* - * We don't really always want to do this since it's - * really inefficient - it would be much better if we could - * test if we had actually throttled the generic layer. - * Unfortunately then there would be a nasty SMP race where - * we could clear that flag just as we refuse another packet. - * For now we do the safe thing. + * We always used to run the wakeup tasklet unconditionally here, for + * fear of race conditions where we clear the BLOCKED flag just as we + * refuse another packet in pppoatm_send(). This was quite inefficient. + * + * In fact it's OK. The PPP core will only ever call pppoatm_send() + * while holding the channel->downl lock. And ppp_output_wakeup() as + * called by the tasklet will *also* grab that lock. So even if another + * CPU is in pppoatm_send() right now, the tasklet isn't going to race + * with it. The wakeup *will* happen after the other CPU is safely out + * of pppoatm_send() again. + * + * So if the CPU in pppoatm_send() has already set the BLOCKED bit and + * it about to return, that's fine. We trigger a wakeup which will + * happen later. And if the CPU in pppoatm_send() *hasn't* set the + * BLOCKED bit yet, that's fine too because of the double check in + * pppoatm_may_send() which is commented there. */ - tasklet_schedule(&pvcc->wakeup_tasklet); + if (test_and_clear_bit(BLOCKED, &pvcc->blocked)) + tasklet_schedule(&pvcc->wakeup_tasklet); } /* @@ -184,6 +211,51 @@ error: ppp_input_error(&pvcc->chan, 0); } +static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size) +{ + /* + * It's not clear that we need to bother with using atm_may_send() + * to check we don't exceed sk->sk_sndbuf. If userspace sets a + * value of sk_sndbuf which is lower than the MTU, we're going to + * block for ever. But the code always did that before we introduced + * the packet count limit, so... + */ + if (atm_may_send(pvcc->atmvcc, size) && + atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT)) + return 1; + + /* + * We use test_and_set_bit() rather than set_bit() here because + * we need to ensure there's a memory barrier after it. The bit + * *must* be set before we do the atomic_inc() on pvcc->inflight. + * There's no smp_mb__after_set_bit(), so it's this or abuse + * smp_mb__after_clear_bit(). + */ + test_and_set_bit(BLOCKED, &pvcc->blocked); + + /* + * We may have raced with pppoatm_pop(). If it ran for the + * last packet in the queue, *just* before we set the BLOCKED + * bit, then it might never run again and the channel could + * remain permanently blocked. Cope with that race by checking + * *again*. If it did run in that window, we'll have space on + * the queue now and can return success. It's harmless to leave + * the BLOCKED flag set, since it's only used as a trigger to + * run the wakeup tasklet. Another wakeup will never hurt. + * If pppoatm_pop() is running but hasn't got as far as making + * space on the queue yet, then it hasn't checked the BLOCKED + * flag yet either, so we're safe in that case too. It'll issue + * an "immediate" wakeup... where "immediate" actually involves + * taking the PPP channel's ->downl lock, which is held by the + * code path that calls pppoatm_send(), and is thus going to + * wait for us to finish. + */ + if (atm_may_send(pvcc->atmvcc, size) && + atomic_inc_not_zero(&pvcc->inflight)) + return 1; + + return 0; +} /* * Called by the ppp_generic.c to send a packet - returns true if packet * was accepted. If we return false, then it's our job to call @@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) struct sk_buff *n; n = skb_realloc_headroom(skb, LLC_LEN); if (n != NULL && - !atm_may_send(pvcc->atmvcc, n->truesize)) { + !pppoatm_may_send(pvcc, n->truesize)) { kfree_skb(n); goto nospace; } @@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) skb = n; if (skb == NULL) return DROP_PACKET; - } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) + } else if (!pppoatm_may_send(pvcc, skb->truesize)) goto nospace; memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN); break; case e_vc: - if (!atm_may_send(pvcc->atmvcc, skb->truesize)) + if (!pppoatm_may_send(pvcc, skb->truesize)) goto nospace; break; case e_autodetect: @@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg) if (pvcc == NULL) return -ENOMEM; pvcc->atmvcc = atmvcc; + + /* Maximum is zero, so that we can use atomic_inc_not_zero() */ + atomic_set(&pvcc->inflight, NONE_INFLIGHT); pvcc->old_push = atmvcc->push; pvcc->old_pop = atmvcc->pop; pvcc->encaps = (enum pppoatm_encaps) be.encaps; diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 509c8ac02b6..86767ca908a 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -166,7 +166,7 @@ void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type, { struct sk_buff *skb; struct atmsvc_msg *msg; - static unsigned session = 0; + static unsigned int session = 0; pr_debug("%d (0x%p)\n", (int)type, vcc); while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL))) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 9d9a6a3edbd..779095ded68 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, case AX25_P_NETROM: if (ax25_protocol_is_registered(AX25_P_NETROM)) return -ESOCKTNOSUPPORT; + break; #endif #ifdef CONFIG_ROSE_MODULE case AX25_P_ROSE: @@ -1990,7 +1991,6 @@ static int __init ax25_init(void) sock_register(&ax25_family_ops); dev_add_pack(&ax25_packet_type); register_netdevice_notifier(&ax25_dev_notifier); - ax25_register_sysctl(); proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); @@ -2013,7 +2013,6 @@ static void __exit ax25_exit(void) proc_net_remove(&init_net, "ax25_calls"); unregister_netdevice_notifier(&ax25_dev_notifier); - ax25_unregister_sysctl(); dev_remove_pack(&ax25_packet_type); diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index d0de30e8959..3d106767b27 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -59,8 +59,6 @@ void ax25_dev_device_up(struct net_device *dev) return; } - ax25_unregister_sysctl(); - dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); @@ -90,7 +88,7 @@ void ax25_dev_device_up(struct net_device *dev) ax25_dev_list = ax25_dev; spin_unlock_bh(&ax25_dev_lock); - ax25_register_sysctl(); + ax25_register_dev_sysctl(ax25_dev); } void ax25_dev_device_down(struct net_device *dev) @@ -100,7 +98,7 @@ void ax25_dev_device_down(struct net_device *dev) if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; - ax25_unregister_sysctl(); + ax25_unregister_dev_sysctl(ax25_dev); spin_lock_bh(&ax25_dev_lock); @@ -120,7 +118,6 @@ void ax25_dev_device_down(struct net_device *dev) spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); - ax25_register_sysctl(); return; } @@ -130,7 +127,6 @@ void ax25_dev_device_down(struct net_device *dev) spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); - ax25_register_sysctl(); return; } @@ -138,8 +134,6 @@ void ax25_dev_device_down(struct net_device *dev) } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; - - ax25_register_sysctl(); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 846ae4e2b11..67de6b33f2c 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -48,7 +48,7 @@ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { unsigned char *buff; @@ -219,7 +219,7 @@ put: int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { return -AX25_HEADER_LEN; } diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index ebe0ef3f1d8..d5744b75251 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c @@ -29,17 +29,6 @@ static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; static int min_ds_timeout[1], max_ds_timeout[] = {65535000}; #endif -static struct ctl_table_header *ax25_table_header; - -static ctl_table *ax25_table; -static int ax25_table_size; - -static struct ctl_path ax25_path[] = { - { .procname = "net", }, - { .procname = "ax25", }, - { } -}; - static const ctl_table ax25_param_table[] = { { .procname = "ip_default_mode", @@ -159,52 +148,37 @@ static const ctl_table ax25_param_table[] = { { } /* that's all, folks! */ }; -void ax25_register_sysctl(void) +int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { - ax25_dev *ax25_dev; - int n, k; - - spin_lock_bh(&ax25_dev_lock); - for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) - ax25_table_size += sizeof(ctl_table); - - if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { - spin_unlock_bh(&ax25_dev_lock); - return; - } - - for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { - struct ctl_table *child = kmemdup(ax25_param_table, - sizeof(ax25_param_table), - GFP_ATOMIC); - if (!child) { - while (n--) - kfree(ax25_table[n].child); - kfree(ax25_table); - spin_unlock_bh(&ax25_dev_lock); - return; - } - ax25_table[n].child = ax25_dev->systable = child; - ax25_table[n].procname = ax25_dev->dev->name; - ax25_table[n].mode = 0555; - - - for (k = 0; k < AX25_MAX_VALUES; k++) - child[k].data = &ax25_dev->values[k]; - - n++; + char path[sizeof("net/ax25/") + IFNAMSIZ]; + int k; + struct ctl_table *table; + + table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + for (k = 0; k < AX25_MAX_VALUES; k++) + table[k].data = &ax25_dev->values[k]; + + snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name); + ax25_dev->sysheader = register_net_sysctl(&init_net, path, table); + if (!ax25_dev->sysheader) { + kfree(table); + return -ENOMEM; } - spin_unlock_bh(&ax25_dev_lock); - - ax25_table_header = register_sysctl_paths(ax25_path, ax25_table); + return 0; } -void ax25_unregister_sysctl(void) +void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) { - ctl_table *p; - unregister_sysctl_table(ax25_table_header); - - for (p = ax25_table; p->procname; p++) - kfree(p->child); - kfree(ax25_table); + struct ctl_table_header *header = ax25_dev->sysheader; + struct ctl_table *table; + + if (header) { + ax25_dev->sysheader = NULL; + table = header->ctl_table_arg; + unregister_net_sysctl_table(header); + kfree(table); + } } diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 2b68d068eaf..53f5244e28f 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -7,19 +7,28 @@ config BATMAN_ADV depends on NET select CRC16 default n - ---help--- + help + B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is + a routing protocol for multi-hop ad-hoc mesh networks. The + networks may be wired or wireless. See + http://www.open-mesh.org/ for more information and user space + tools. - B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is - a routing protocol for multi-hop ad-hoc mesh networks. The - networks may be wired or wireless. See - http://www.open-mesh.org/ for more information and user space - tools. +config BATMAN_ADV_BLA + bool "Bridge Loop Avoidance" + depends on BATMAN_ADV && INET + default y + help + This option enables BLA (Bridge Loop Avoidance), a mechanism + to avoid Ethernet frames looping when mesh nodes are connected + to both the same LAN and the same mesh. If you will never use + more than one mesh node in the same LAN, you can safely remove + this feature and save some space. config BATMAN_ADV_DEBUG bool "B.A.T.M.A.N. debugging" - depends on BATMAN_ADV != n - ---help--- - + depends on BATMAN_ADV + help This is an option for use by developers; most people should say N here. This enables compilation of support for outputting debugging information to the kernel log. The diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index 4e392ebedb6..6d5c1940667 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o batman-adv-y += bat_iv_ogm.o batman-adv-y += bat_sysfs.o batman-adv-y += bitarray.o +batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o batman-adv-y += gateway_client.o batman-adv-y += gateway_common.o batman-adv-y += hard-interface.o diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c index c3b0548b175..3b588f86d77 100644 --- a/net/batman-adv/bat_debugfs.c +++ b/net/batman-adv/bat_debugfs.c @@ -32,6 +32,7 @@ #include "soft-interface.h" #include "vis.h" #include "icmp_socket.h" +#include "bridge_loop_avoidance.h" static struct dentry *bat_debugfs; @@ -82,8 +83,8 @@ int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) va_start(args, fmt); vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); - fdebug_log(bat_priv->debug_log, "[%10lu] %s", - (jiffies / HZ), tmp_log_buf); + fdebug_log(bat_priv->debug_log, "[%10u] %s", + jiffies_to_msecs(jiffies), tmp_log_buf); va_end(args); return 0; @@ -238,17 +239,19 @@ static int gateways_open(struct inode *inode, struct file *file) return single_open(file, gw_client_seq_print_text, net_dev); } -static int softif_neigh_open(struct inode *inode, struct file *file) +static int transtable_global_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, softif_neigh_seq_print_text, net_dev); + return single_open(file, tt_global_seq_print_text, net_dev); } -static int transtable_global_open(struct inode *inode, struct file *file) +#ifdef CONFIG_BATMAN_ADV_BLA +static int bla_claim_table_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, tt_global_seq_print_text, net_dev); + return single_open(file, bla_claim_table_seq_print_text, net_dev); } +#endif static int transtable_local_open(struct inode *inode, struct file *file) { @@ -282,16 +285,20 @@ struct bat_debuginfo bat_debuginfo_##_name = { \ static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); -static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open); static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); +#ifdef CONFIG_BATMAN_ADV_BLA +static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open); +#endif static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); static struct bat_debuginfo *mesh_debuginfos[] = { &bat_debuginfo_originators, &bat_debuginfo_gateways, - &bat_debuginfo_softif_neigh, &bat_debuginfo_transtable_global, +#ifdef CONFIG_BATMAN_ADV_BLA + &bat_debuginfo_bla_claim_table, +#endif &bat_debuginfo_transtable_local, &bat_debuginfo_vis_data, NULL, diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a6d5d63fb6a..dc53798ebb4 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -30,33 +30,69 @@ #include "send.h" #include "bat_algo.h" -static void bat_iv_ogm_init(struct hard_iface *hard_iface) +static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + struct orig_node *orig_node, + struct orig_node *orig_neigh, + uint32_t seqno) +{ + struct neigh_node *neigh_node; + + neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno); + if (!neigh_node) + goto out; + + INIT_LIST_HEAD(&neigh_node->bonding_list); + + neigh_node->orig_node = orig_neigh; + neigh_node->if_incoming = hard_iface; + + spin_lock_bh(&orig_node->neigh_list_lock); + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); + spin_unlock_bh(&orig_node->neigh_list_lock); + +out: + return neigh_node; +} + +static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; + uint32_t random_seqno; + int res = -1; - hard_iface->packet_len = BATMAN_OGM_LEN; + /* randomize initial seqno to avoid collision */ + get_random_bytes(&random_seqno, sizeof(random_seqno)); + atomic_set(&hard_iface->seqno, random_seqno); + + hard_iface->packet_len = BATMAN_OGM_HLEN; hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); + if (!hard_iface->packet_buff) + goto out; + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; - batman_ogm_packet->header.packet_type = BAT_OGM; + batman_ogm_packet->header.packet_type = BAT_IV_OGM; batman_ogm_packet->header.version = COMPAT_VERSION; batman_ogm_packet->header.ttl = 2; batman_ogm_packet->flags = NO_FLAGS; batman_ogm_packet->tq = TQ_MAX_VALUE; batman_ogm_packet->tt_num_changes = 0; batman_ogm_packet->ttvn = 0; + + res = 0; + +out: + return res; } -static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface) +static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) { - struct batman_ogm_packet *batman_ogm_packet; - - batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; - batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; - batman_ogm_packet->header.ttl = TTL; + kfree(hard_iface->packet_buff); + hard_iface->packet_buff = NULL; } -static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface) +static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; @@ -67,6 +103,15 @@ static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface) hard_iface->net_dev->dev_addr, ETH_ALEN); } +static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; + batman_ogm_packet->header.ttl = TTL; +} + /* when do we schedule our own ogm to be sent */ static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv) { @@ -92,7 +137,7 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, int tt_num_changes) { - int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes); + int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes); return (next_buff_pos <= packet_len) && (next_buff_pos <= MAX_AGGREGATION_BYTES); @@ -132,7 +177,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, "Sending own" : "Forwarding")); bat_dbg(DBG_BATMAN, bat_priv, - "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", + "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", fwd_str, (packet_num > 0 ? "aggregated " : ""), batman_ogm_packet->orig, ntohl(batman_ogm_packet->seqno), @@ -142,7 +187,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, batman_ogm_packet->ttvn, hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); - buff_pos += BATMAN_OGM_LEN + + buff_pos += BATMAN_OGM_HLEN + tt_len(batman_ogm_packet->tt_num_changes); packet_num++; batman_ogm_packet = (struct batman_ogm_packet *) @@ -191,7 +236,7 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet) /* FIXME: what about aggregated packets ? */ bat_dbg(DBG_BATMAN, bat_priv, - "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n", + "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n", (forw_packet->own ? "Sending own" : "Forwarding"), batman_ogm_packet->orig, ntohl(batman_ogm_packet->seqno), @@ -335,10 +380,9 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, if ((atomic_read(&bat_priv->aggregated_ogms)) && (packet_len < MAX_AGGREGATION_BYTES)) forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + - sizeof(struct ethhdr)); + ETH_HLEN); else - forw_packet_aggr->skb = dev_alloc_skb(packet_len + - sizeof(struct ethhdr)); + forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN); if (!forw_packet_aggr->skb) { if (!own_packet) @@ -346,7 +390,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, kfree(forw_packet_aggr); goto out; } - skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); + skb_reserve(forw_packet_aggr->skb, ETH_HLEN); INIT_HLIST_NODE(&forw_packet_aggr->list); @@ -461,11 +505,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, static void bat_iv_ogm_forward(struct orig_node *orig_node, const struct ethhdr *ethhdr, struct batman_ogm_packet *batman_ogm_packet, - int directlink, struct hard_iface *if_incoming) + bool is_single_hop_neigh, + bool is_from_best_next_hop, + struct hard_iface *if_incoming) { struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); - struct neigh_node *router; - uint8_t in_tq, in_ttl, tq_avg = 0; uint8_t tt_num_changes; if (batman_ogm_packet->header.ttl <= 1) { @@ -473,54 +517,43 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node, return; } - router = orig_node_get_router(orig_node); + if (!is_from_best_next_hop) { + /* Mark the forwarded packet when it is not coming from our + * best next hop. We still need to forward the packet for our + * neighbor link quality detection to work in case the packet + * originated from a single hop neighbor. Otherwise we can + * simply drop the ogm. + */ + if (is_single_hop_neigh) + batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP; + else + return; + } - in_tq = batman_ogm_packet->tq; - in_ttl = batman_ogm_packet->header.ttl; tt_num_changes = batman_ogm_packet->tt_num_changes; batman_ogm_packet->header.ttl--; memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); - /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast - * of our best tq value */ - if (router && router->tq_avg != 0) { - - /* rebroadcast ogm of best ranking neighbor as is */ - if (!compare_eth(router->addr, ethhdr->h_source)) { - batman_ogm_packet->tq = router->tq_avg; - - if (router->last_ttl) - batman_ogm_packet->header.ttl = - router->last_ttl - 1; - } - - tq_avg = router->tq_avg; - } - - if (router) - neigh_node_free_ref(router); - /* apply hop penalty */ batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); bat_dbg(DBG_BATMAN, bat_priv, - "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n", - in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1, - batman_ogm_packet->header.ttl); + "Forwarding packet: tq: %i, ttl: %i\n", + batman_ogm_packet->tq, batman_ogm_packet->header.ttl); batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno); batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc); /* switch of primaries first hop flag when forwarding */ batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; - if (directlink) + if (is_single_hop_neigh) batman_ogm_packet->flags |= DIRECTLINK; else batman_ogm_packet->flags &= ~DIRECTLINK; bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, - BATMAN_OGM_LEN + tt_len(tt_num_changes), + BATMAN_OGM_HLEN + tt_len(tt_num_changes), if_incoming, 0, bat_iv_ogm_fwd_send_time()); } @@ -603,12 +636,12 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, if (is_duplicate) continue; - spin_lock_bh(&tmp_neigh_node->tq_lock); + spin_lock_bh(&tmp_neigh_node->lq_update_lock); ring_buffer_set(tmp_neigh_node->tq_recv, &tmp_neigh_node->tq_index, 0); tmp_neigh_node->tq_avg = ring_buffer_avg(tmp_neigh_node->tq_recv); - spin_unlock_bh(&tmp_neigh_node->tq_lock); + spin_unlock_bh(&tmp_neigh_node->lq_update_lock); } if (!neigh_node) { @@ -618,8 +651,9 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, if (!orig_tmp) goto unlock; - neigh_node = create_neighbor(orig_node, orig_tmp, - ethhdr->h_source, if_incoming); + neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, + orig_node, orig_tmp, + batman_ogm_packet->seqno); orig_node_free_ref(orig_tmp); if (!neigh_node) @@ -631,14 +665,14 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, rcu_read_unlock(); orig_node->flags = batman_ogm_packet->flags; - neigh_node->last_valid = jiffies; + neigh_node->last_seen = jiffies; - spin_lock_bh(&neigh_node->tq_lock); + spin_lock_bh(&neigh_node->lq_update_lock); ring_buffer_set(neigh_node->tq_recv, &neigh_node->tq_index, batman_ogm_packet->tq); neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); - spin_unlock_bh(&neigh_node->tq_lock); + spin_unlock_bh(&neigh_node->lq_update_lock); if (!is_duplicate) { orig_node->last_ttl = batman_ogm_packet->header.ttl; @@ -744,19 +778,20 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, rcu_read_unlock(); if (!neigh_node) - neigh_node = create_neighbor(orig_neigh_node, - orig_neigh_node, - orig_neigh_node->orig, - if_incoming); + neigh_node = bat_iv_ogm_neigh_new(if_incoming, + orig_neigh_node->orig, + orig_neigh_node, + orig_neigh_node, + batman_ogm_packet->seqno); if (!neigh_node) goto out; - /* if orig_node is direct neighbor update neigh_node last_valid */ + /* if orig_node is direct neighbor update neigh_node last_seen */ if (orig_node == orig_neigh_node) - neigh_node->last_valid = jiffies; + neigh_node->last_seen = jiffies; - orig_node->last_valid = jiffies; + orig_node->last_seen = jiffies; /* find packet count of corresponding one hop neighbor */ spin_lock_bh(&orig_node->ogm_cnt_lock); @@ -842,7 +877,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; /* signalize caller that the packet is to be dropped. */ - if (window_protected(bat_priv, seq_diff, + if (!hlist_empty(&orig_node->neigh_list) && + window_protected(bat_priv, seq_diff, &orig_node->batman_seqno_reset)) goto out; @@ -850,9 +886,9 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, hlist_for_each_entry_rcu(tmp_neigh_node, node, &orig_node->neigh_list, list) { - is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, - orig_node->last_real_seqno, - batman_ogm_packet->seqno); + is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + batman_ogm_packet->seqno); if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && (tmp_neigh_node->if_incoming == if_incoming)) @@ -866,13 +902,14 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, seq_diff, set_mark); tmp_neigh_node->real_packet_count = - bit_packet_count(tmp_neigh_node->real_bits); + bitmap_weight(tmp_neigh_node->real_bits, + TQ_LOCAL_WINDOW_SIZE); } rcu_read_unlock(); if (need_update) { bat_dbg(DBG_BATMAN, bat_priv, - "updating last_seqno: old %d, new %d\n", + "updating last_seqno: old %u, new %u\n", orig_node->last_real_seqno, batman_ogm_packet->seqno); orig_node->last_real_seqno = batman_ogm_packet->seqno; } @@ -897,7 +934,9 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, struct neigh_node *orig_neigh_router = NULL; int has_directlink_flag; int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; - int is_broadcast = 0, is_bidirectional, is_single_hop_neigh; + int is_broadcast = 0, is_bidirectional; + bool is_single_hop_neigh = false; + bool is_from_best_next_hop = false; int is_duplicate; uint32_t if_incoming_seqno; @@ -913,7 +952,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, * packet in an aggregation. Here we expect that the padding * is always zero (or not 0x01) */ - if (batman_ogm_packet->header.packet_type != BAT_OGM) + if (batman_ogm_packet->header.packet_type != BAT_IV_OGM) return; /* could be changed by schedule_own_packet() */ @@ -921,11 +960,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); - is_single_hop_neigh = (compare_eth(ethhdr->h_source, - batman_ogm_packet->orig) ? 1 : 0); + if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig)) + is_single_hop_neigh = true; bat_dbg(DBG_BATMAN, bat_priv, - "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", + "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", ethhdr->h_source, if_incoming->net_dev->name, if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, @@ -998,11 +1037,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); word = &(orig_neigh_node->bcast_own[offset]); - bit_mark(word, - if_incoming_seqno - + bat_set_bit(word, + if_incoming_seqno - batman_ogm_packet->seqno - 2); orig_neigh_node->bcast_own_sum[if_incoming->if_num] = - bit_packet_count(word); + bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); } @@ -1019,6 +1058,13 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, return; } + if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", + ethhdr->h_source); + return; + } + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); if (!orig_node) return; @@ -1043,6 +1089,10 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, if (router) router_router = orig_node_get_router(router->orig_node); + if ((router && router->tq_avg != 0) && + (compare_eth(router->addr, ethhdr->h_source))) + is_from_best_next_hop = true; + /* avoid temporary routing loops */ if (router && router_router && (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && @@ -1093,7 +1143,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, /* mark direct link on incoming interface */ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - 1, if_incoming); + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); @@ -1116,7 +1167,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast originator packet\n"); bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - 0, if_incoming); + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); out_neigh: if ((orig_neigh_node) && (!is_single_hop_neigh)) @@ -1132,13 +1184,25 @@ out: orig_node_free_ref(orig_node); } -static void bat_iv_ogm_receive(struct hard_iface *if_incoming, - struct sk_buff *skb) +static int bat_iv_ogm_receive(struct sk_buff *skb, + struct hard_iface *if_incoming) { + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batman_ogm_packet *batman_ogm_packet; struct ethhdr *ethhdr; int buff_pos = 0, packet_len; unsigned char *tt_buff, *packet_buff; + bool ret; + + ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); + if (!ret) + return NET_RX_DROP; + + /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface + * that does not have B.A.T.M.A.N. IV enabled ? + */ + if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit) + return NET_RX_DROP; packet_len = skb_headlen(skb); ethhdr = (struct ethhdr *)skb_mac_header(skb); @@ -1152,31 +1216,50 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming, batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno); batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc); - tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN; + tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN; bat_iv_ogm_process(ethhdr, batman_ogm_packet, tt_buff, if_incoming); - buff_pos += BATMAN_OGM_LEN + + buff_pos += BATMAN_OGM_HLEN + tt_len(batman_ogm_packet->tt_num_changes); batman_ogm_packet = (struct batman_ogm_packet *) (packet_buff + buff_pos); } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len, batman_ogm_packet->tt_num_changes)); + + kfree_skb(skb); + return NET_RX_SUCCESS; } static struct bat_algo_ops batman_iv __read_mostly = { .name = "BATMAN IV", - .bat_ogm_init = bat_iv_ogm_init, - .bat_ogm_init_primary = bat_iv_ogm_init_primary, - .bat_ogm_update_mac = bat_iv_ogm_update_mac, + .bat_iface_enable = bat_iv_ogm_iface_enable, + .bat_iface_disable = bat_iv_ogm_iface_disable, + .bat_iface_update_mac = bat_iv_ogm_iface_update_mac, + .bat_primary_iface_set = bat_iv_ogm_primary_iface_set, .bat_ogm_schedule = bat_iv_ogm_schedule, .bat_ogm_emit = bat_iv_ogm_emit, - .bat_ogm_receive = bat_iv_ogm_receive, }; int __init bat_iv_init(void) { - return bat_algo_register(&batman_iv); + int ret; + + /* batman originator packet */ + ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); + if (ret < 0) + goto out; + + ret = bat_algo_register(&batman_iv); + if (ret < 0) + goto handler_unregister; + + goto out; + +handler_unregister: + recv_handler_unregister(BAT_IV_OGM); +out: + return ret; } diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 68ff759fc30..5bc7b66d32d 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -63,7 +63,7 @@ struct bat_attribute bat_attr_##_name = { \ .store = _store, \ }; -#define BAT_ATTR_STORE_BOOL(_name, _post_func) \ +#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ char *buff, size_t count) \ { \ @@ -73,9 +73,9 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ &bat_priv->_name, net_dev); \ } -#define BAT_ATTR_SHOW_BOOL(_name) \ -ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff) \ +#define BAT_ATTR_SIF_SHOW_BOOL(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ { \ struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ return sprintf(buff, "%s\n", \ @@ -83,16 +83,17 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ "disabled" : "enabled"); \ } \ -/* Use this, if you are going to turn a [name] in bat_priv on or off */ -#define BAT_ATTR_BOOL(_name, _mode, _post_func) \ - static BAT_ATTR_STORE_BOOL(_name, _post_func) \ - static BAT_ATTR_SHOW_BOOL(_name) \ +/* Use this, if you are going to turn a [name] in the soft-interface + * (bat_priv) on or off */ +#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \ + static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \ + static BAT_ATTR_SIF_SHOW_BOOL(_name) \ static BAT_ATTR(_name, _mode, show_##_name, store_##_name) -#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ +#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff, size_t count) \ + char *buff, size_t count) \ { \ struct net_device *net_dev = kobj_to_netdev(kobj); \ struct bat_priv *bat_priv = netdev_priv(net_dev); \ @@ -100,19 +101,62 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ attr, &bat_priv->_name, net_dev); \ } -#define BAT_ATTR_SHOW_UINT(_name) \ -ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff) \ +#define BAT_ATTR_SIF_SHOW_UINT(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ { \ struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \ } \ -/* Use this, if you are going to set [name] in bat_priv to unsigned integer - * values only */ -#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func) \ - static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ - static BAT_ATTR_SHOW_UINT(_name) \ +/* Use this, if you are going to set [name] in the soft-interface + * (bat_priv) to an unsigned integer value */ +#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \ + static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \ + static BAT_ATTR_SIF_SHOW_UINT(_name) \ + static BAT_ATTR(_name, _mode, show_##_name, store_##_name) + + +#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ +ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ + char *buff, size_t count) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ + ssize_t length; \ + \ + if (!hard_iface) \ + return 0; \ + \ + length = __store_uint_attr(buff, count, _min, _max, _post_func, \ + attr, &hard_iface->_name, net_dev); \ + \ + hardif_free_ref(hard_iface); \ + return length; \ +} + +#define BAT_ATTR_HIF_SHOW_UINT(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ + ssize_t length; \ + \ + if (!hard_iface) \ + return 0; \ + \ + length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\ + \ + hardif_free_ref(hard_iface); \ + return length; \ +} + +/* Use this, if you are going to set [name] in hard_iface to an + * unsigned integer value*/ +#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \ + static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ + static BAT_ATTR_HIF_SHOW_UINT(_name) \ static BAT_ATTR(_name, _mode, show_##_name, store_##_name) @@ -149,7 +193,7 @@ static int store_bool_attr(char *buff, size_t count, atomic_read(attr) == 1 ? "enabled" : "disabled", enabled == 1 ? "enabled" : "disabled"); - atomic_set(attr, (unsigned)enabled); + atomic_set(attr, (unsigned int)enabled); return count; } @@ -268,7 +312,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr, "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ? "client" : "server"); - atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp); + atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp); return count; } @@ -354,7 +398,7 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr, curr_gw_mode_str, buff); gw_deselect(bat_priv); - atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp); + atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp); return count; } @@ -384,26 +428,32 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, return gw_bandwidth_set(net_dev, buff, count); } -BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); -BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); -BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); -BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); +#ifdef CONFIG_BATMAN_ADV_BLA +BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); +#endif +BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); +BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL); static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); -BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); -BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); -BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, - post_gw_deselect); +BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); +BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); +BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, + post_gw_deselect); static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, store_gw_bwidth); #ifdef CONFIG_BATMAN_ADV_DEBUG -BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); +BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); #endif static struct bat_attribute *mesh_attrs[] = { &bat_attr_aggregated_ogms, &bat_attr_bonding, +#ifdef CONFIG_BATMAN_ADV_BLA + &bat_attr_bridge_loop_avoidance, +#endif &bat_attr_fragmentation, &bat_attr_ap_isolation, &bat_attr_vis_mode, diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index 6d0aa216b23..07ae6e1b8ac 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -24,100 +24,13 @@ #include <linux/bitops.h> -/* returns true if the corresponding bit in the given seq_bits indicates true - * and curr_seqno is within range of last_seqno */ -int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, - uint32_t curr_seqno) -{ - int32_t diff, word_offset, word_num; - - diff = last_seqno - curr_seqno; - if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) { - return 0; - } else { - /* which word */ - word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE; - /* which position in the selected word */ - word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE; - - if (test_bit(word_offset, &seq_bits[word_num])) - return 1; - else - return 0; - } -} - -/* turn corresponding bit on, so we can remember that we got the packet */ -void bit_mark(unsigned long *seq_bits, int32_t n) -{ - int32_t word_offset, word_num; - - /* if too old, just drop it */ - if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) - return; - - /* which word */ - word_num = n / WORD_BIT_SIZE; - /* which position in the selected word */ - word_offset = n % WORD_BIT_SIZE; - - set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */ -} - /* shift the packet array by n places. */ -static void bit_shift(unsigned long *seq_bits, int32_t n) +static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n) { - int32_t word_offset, word_num; - int32_t i; - if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) return; - word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */ - word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */ - - for (i = NUM_WORDS - 1; i > word_num; i--) { - /* going from old to new, so we don't overwrite the data we copy - * from. - * - * left is high, right is low: FEDC BA98 7654 3210 - * ^^ ^^ - * vvvv - * ^^^^ = from, vvvvv =to, we'd have word_num==1 and - * word_offset==WORD_BIT_SIZE/2 ????? in this example. - * (=24 bits) - * - * our desired output would be: 9876 5432 1000 0000 - * */ - - seq_bits[i] = - (seq_bits[i - word_num] << word_offset) + - /* take the lower port from the left half, shift it left - * to its final position */ - (seq_bits[i - word_num - 1] >> - (WORD_BIT_SIZE-word_offset)); - /* and the upper part of the right half and shift it left to - * its position */ - /* for our example that would be: word[0] = 9800 + 0076 = - * 9876 */ - } - /* now for our last word, i==word_num, we only have its "left" half. - * that's the 1000 word in our example.*/ - - seq_bits[i] = (seq_bits[i - word_num] << word_offset); - - /* pad the rest with 0, if there is anything */ - i--; - - for (; i >= 0; i--) - seq_bits[i] = 0; -} - -static void bit_reset_window(unsigned long *seq_bits) -{ - int i; - for (i = 0; i < NUM_WORDS; i++) - seq_bits[i] = 0; + bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE); } @@ -137,7 +50,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { if (set_mark) - bit_mark(seq_bits, -seq_num_diff); + bat_set_bit(seq_bits, -seq_num_diff); return 0; } @@ -145,10 +58,10 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, * set the mark if required */ if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { - bit_shift(seq_bits, seq_num_diff); + bat_bitmap_shift_left(seq_bits, seq_num_diff); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -159,9 +72,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, bat_dbg(DBG_BATMAN, bat_priv, "We missed a lot of packets (%i) !\n", seq_num_diff - 1); - bit_reset_window(seq_bits); + bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -176,9 +89,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, bat_dbg(DBG_BATMAN, bat_priv, "Other host probably restarted!\n"); - bit_reset_window(seq_bits); + bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -186,16 +99,3 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, /* never reached */ return 0; } - -/* count the hamming weight, how many good packets did we receive? just count - * the 1's. - */ -int bit_packet_count(const unsigned long *seq_bits) -{ - int i, hamming = 0; - - for (i = 0; i < NUM_WORDS; i++) - hamming += hweight_long(seq_bits[i]); - - return hamming; -} diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index c6135728a68..1835c15cda4 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h @@ -22,23 +22,33 @@ #ifndef _NET_BATMAN_ADV_BITARRAY_H_ #define _NET_BATMAN_ADV_BITARRAY_H_ -#define WORD_BIT_SIZE (sizeof(unsigned long) * 8) - /* returns true if the corresponding bit in the given seq_bits indicates true * and curr_seqno is within range of last_seqno */ -int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, - uint32_t curr_seqno); +static inline int bat_test_bit(const unsigned long *seq_bits, + uint32_t last_seqno, uint32_t curr_seqno) +{ + int32_t diff; + + diff = last_seqno - curr_seqno; + if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) + return 0; + else + return test_bit(diff, seq_bits); +} /* turn corresponding bit on, so we can remember that we got the packet */ -void bit_mark(unsigned long *seq_bits, int32_t n); +static inline void bat_set_bit(unsigned long *seq_bits, int32_t n) +{ + /* if too old, just drop it */ + if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) + return; + set_bit(n, seq_bits); /* turn the position on */ +} /* receive and process one packet, returns 1 if received seq_num is considered * new, 0 if old */ int bit_get_packet(void *priv, unsigned long *seq_bits, int32_t seq_num_diff, int set_mark); -/* count the hamming weight, how many good packets did we receive? */ -int bit_packet_count(const unsigned long *seq_bits); - #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c new file mode 100644 index 00000000000..c5863f49913 --- /dev/null +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -0,0 +1,1587 @@ +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "hash.h" +#include "hard-interface.h" +#include "originator.h" +#include "bridge_loop_avoidance.h" +#include "translation-table.h" +#include "send.h" + +#include <linux/etherdevice.h> +#include <linux/crc16.h> +#include <linux/if_arp.h> +#include <net/arp.h> +#include <linux/if_vlan.h> + +static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; + +static void bla_periodic_work(struct work_struct *work); +static void bla_send_announce(struct bat_priv *bat_priv, + struct backbone_gw *backbone_gw); + +/* return the index of the claim */ +static inline uint32_t choose_claim(const void *data, uint32_t size) +{ + const unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < ETH_ALEN + sizeof(short); i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + +/* return the index of the backbone gateway */ +static inline uint32_t choose_backbone_gw(const void *data, uint32_t size) +{ + const unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < ETH_ALEN + sizeof(short); i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + + +/* compares address and vid of two backbone gws */ +static int compare_backbone_gw(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct backbone_gw, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); +} + +/* compares address and vid of two claims */ +static int compare_claim(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct claim, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); +} + +/* free a backbone gw */ +static void backbone_gw_free_ref(struct backbone_gw *backbone_gw) +{ + if (atomic_dec_and_test(&backbone_gw->refcount)) + kfree_rcu(backbone_gw, rcu); +} + +/* finally deinitialize the claim */ +static void claim_free_rcu(struct rcu_head *rcu) +{ + struct claim *claim; + + claim = container_of(rcu, struct claim, rcu); + + backbone_gw_free_ref(claim->backbone_gw); + kfree(claim); +} + +/* free a claim, call claim_free_rcu if its the last reference */ +static void claim_free_ref(struct claim *claim) +{ + if (atomic_dec_and_test(&claim->refcount)) + call_rcu(&claim->rcu, claim_free_rcu); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @data: search data (may be local/static data) + * + * looks for a claim in the hash, and returns it if found + * or NULL otherwise. + */ +static struct claim *claim_hash_find(struct bat_priv *bat_priv, + struct claim *data) +{ + struct hashtable_t *hash = bat_priv->claim_hash; + struct hlist_head *head; + struct hlist_node *node; + struct claim *claim; + struct claim *claim_tmp = NULL; + int index; + + if (!hash) + return NULL; + + index = choose_claim(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + if (!compare_claim(&claim->hash_entry, data)) + continue; + + if (!atomic_inc_not_zero(&claim->refcount)) + continue; + + claim_tmp = claim; + break; + } + rcu_read_unlock(); + + return claim_tmp; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @addr: the address of the originator + * @vid: the VLAN ID + * + * looks for a claim in the hash, and returns it if found + * or NULL otherwise. + */ +static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv, + uint8_t *addr, short vid) +{ + struct hashtable_t *hash = bat_priv->backbone_hash; + struct hlist_head *head; + struct hlist_node *node; + struct backbone_gw search_entry, *backbone_gw; + struct backbone_gw *backbone_gw_tmp = NULL; + int index; + + if (!hash) + return NULL; + + memcpy(search_entry.orig, addr, ETH_ALEN); + search_entry.vid = vid; + + index = choose_backbone_gw(&search_entry, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (!compare_backbone_gw(&backbone_gw->hash_entry, + &search_entry)) + continue; + + if (!atomic_inc_not_zero(&backbone_gw->refcount)) + continue; + + backbone_gw_tmp = backbone_gw; + break; + } + rcu_read_unlock(); + + return backbone_gw_tmp; +} + +/* delete all claims for a backbone */ +static void bla_del_backbone_claims(struct backbone_gw *backbone_gw) +{ + struct hashtable_t *hash; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct claim *claim; + int i; + spinlock_t *list_lock; /* protects write access to the hash lists */ + + hash = backbone_gw->bat_priv->claim_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(claim, node, node_tmp, + head, hash_entry) { + + if (claim->backbone_gw != backbone_gw) + continue; + + claim_free_ref(claim); + hlist_del_rcu(node); + } + spin_unlock_bh(list_lock); + } + + /* all claims gone, intialize CRC */ + backbone_gw->crc = BLA_CRC_INIT; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: the mac address to be announced within the claim + * @vid: the VLAN ID + * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) + * + * sends a claim frame according to the provided info. + */ +static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, + short vid, int claimtype) +{ + struct sk_buff *skb; + struct ethhdr *ethhdr; + struct hard_iface *primary_if; + struct net_device *soft_iface; + uint8_t *hw_src; + struct bla_claim_dst local_claim_dest; + uint32_t zeroip = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + return; + + memcpy(&local_claim_dest, &bat_priv->claim_dest, + sizeof(local_claim_dest)); + local_claim_dest.type = claimtype; + + soft_iface = primary_if->soft_iface; + + skb = arp_create(ARPOP_REPLY, ETH_P_ARP, + /* IP DST: 0.0.0.0 */ + zeroip, + primary_if->soft_iface, + /* IP SRC: 0.0.0.0 */ + zeroip, + /* Ethernet DST: Broadcast */ + NULL, + /* Ethernet SRC/HW SRC: originator mac */ + primary_if->net_dev->dev_addr, + /* HW DST: FF:43:05:XX:00:00 + * with XX = claim type + * and YY:YY = group id + */ + (uint8_t *)&local_claim_dest); + + if (!skb) + goto out; + + ethhdr = (struct ethhdr *)skb->data; + hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr); + + /* now we pretend that the client would have sent this ... */ + switch (claimtype) { + case CLAIM_TYPE_ADD: + /* normal claim frame + * set Ethernet SRC to the clients mac + */ + memcpy(ethhdr->h_source, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); + break; + case CLAIM_TYPE_DEL: + /* unclaim frame + * set HW SRC to the clients mac + */ + memcpy(hw_src, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid); + break; + case CLAIM_TYPE_ANNOUNCE: + /* announcement frame + * set HW SRC to the special mac containg the crc + */ + memcpy(hw_src, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", + ethhdr->h_source, vid); + break; + case CLAIM_TYPE_REQUEST: + /* request frame + * set HW SRC to the special mac containg the crc + */ + memcpy(hw_src, mac, ETH_ALEN); + memcpy(ethhdr->h_dest, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", + ethhdr->h_source, ethhdr->h_dest, vid); + break; + + } + + if (vid != -1) + skb = vlan_insert_tag(skb, vid); + + skb_reset_mac_header(skb); + skb->protocol = eth_type_trans(skb, soft_iface); + bat_priv->stats.rx_packets++; + bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; + soft_iface->last_rx = jiffies; + + netif_rx(skb); +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: the mac address of the originator + * @vid: the VLAN ID + * + * searches for the backbone gw or creates a new one if it could not + * be found. + */ +static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, + uint8_t *orig, short vid) +{ + struct backbone_gw *entry; + struct orig_node *orig_node; + int hash_added; + + entry = backbone_hash_find(bat_priv, orig, vid); + + if (entry) + return entry; + + bat_dbg(DBG_BLA, bat_priv, + "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", + orig, vid); + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return NULL; + + entry->vid = vid; + entry->lasttime = jiffies; + entry->crc = BLA_CRC_INIT; + entry->bat_priv = bat_priv; + atomic_set(&entry->request_sent, 0); + memcpy(entry->orig, orig, ETH_ALEN); + + /* one for the hash, one for returning */ + atomic_set(&entry->refcount, 2); + + hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw, + choose_backbone_gw, entry, &entry->hash_entry); + + if (unlikely(hash_added != 0)) { + /* hash failed, free the structure */ + kfree(entry); + return NULL; + } + + /* this is a gateway now, remove any tt entries */ + orig_node = orig_hash_find(bat_priv, orig); + if (orig_node) { + tt_global_del_orig(bat_priv, orig_node, + "became a backbone gateway"); + orig_node_free_ref(orig_node); + } + return entry; +} + +/* update or add the own backbone gw to make sure we announce + * where we receive other backbone gws + */ +static void bla_update_own_backbone_gw(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + short vid) +{ + struct backbone_gw *backbone_gw; + + backbone_gw = bla_get_backbone_gw(bat_priv, + primary_if->net_dev->dev_addr, vid); + if (unlikely(!backbone_gw)) + return; + + backbone_gw->lasttime = jiffies; + backbone_gw_free_ref(backbone_gw); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @vid: the vid where the request came on + * + * Repeat all of our own claims, and finally send an ANNOUNCE frame + * to allow the requester another check if the CRC is correct now. + */ +static void bla_answer_request(struct bat_priv *bat_priv, + struct hard_iface *primary_if, short vid) +{ + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + struct claim *claim; + struct backbone_gw *backbone_gw; + int i; + + bat_dbg(DBG_BLA, bat_priv, + "bla_answer_request(): received a claim request, send all of our own claims again\n"); + + backbone_gw = backbone_hash_find(bat_priv, + primary_if->net_dev->dev_addr, vid); + if (!backbone_gw) + return; + + hash = bat_priv->claim_hash; + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + /* only own claims are interesting */ + if (claim->backbone_gw != backbone_gw) + continue; + + bla_send_claim(bat_priv, claim->addr, claim->vid, + CLAIM_TYPE_ADD); + } + rcu_read_unlock(); + } + + /* finally, send an announcement frame */ + bla_send_announce(bat_priv, backbone_gw); + backbone_gw_free_ref(backbone_gw); +} + +/** + * @backbone_gw: the backbone gateway from whom we are out of sync + * + * When the crc is wrong, ask the backbone gateway for a full table update. + * After the request, it will repeat all of his own claims and finally + * send an announcement claim with which we can check again. + */ +static void bla_send_request(struct backbone_gw *backbone_gw) +{ + /* first, remove all old entries */ + bla_del_backbone_claims(backbone_gw); + + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "Sending REQUEST to %pM\n", + backbone_gw->orig); + + /* send request */ + bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, + backbone_gw->vid, CLAIM_TYPE_REQUEST); + + /* no local broadcasts should be sent or received, for now. */ + if (!atomic_read(&backbone_gw->request_sent)) { + atomic_inc(&backbone_gw->bat_priv->bla_num_requests); + atomic_set(&backbone_gw->request_sent, 1); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @backbone_gw: our backbone gateway which should be announced + * + * This function sends an announcement. It is called from multiple + * places. + */ +static void bla_send_announce(struct bat_priv *bat_priv, + struct backbone_gw *backbone_gw) +{ + uint8_t mac[ETH_ALEN]; + uint16_t crc; + + memcpy(mac, announce_mac, 4); + crc = htons(backbone_gw->crc); + memcpy(&mac[4], (uint8_t *)&crc, 2); + + bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE); + +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @mac: the mac address of the claim + * @vid: the VLAN ID of the frame + * @backbone_gw: the backbone gateway which claims it + * + * Adds a claim in the claim hash. + */ +static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac, + const short vid, struct backbone_gw *backbone_gw) +{ + struct claim *claim; + struct claim search_claim; + int hash_added; + + memcpy(search_claim.addr, mac, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + + /* create a new claim entry if it does not exist yet. */ + if (!claim) { + claim = kzalloc(sizeof(*claim), GFP_ATOMIC); + if (!claim) + return; + + memcpy(claim->addr, mac, ETH_ALEN); + claim->vid = vid; + claim->lasttime = jiffies; + claim->backbone_gw = backbone_gw; + + atomic_set(&claim->refcount, 2); + bat_dbg(DBG_BLA, bat_priv, + "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", + mac, vid); + hash_added = hash_add(bat_priv->claim_hash, compare_claim, + choose_claim, claim, &claim->hash_entry); + + if (unlikely(hash_added != 0)) { + /* only local changes happened. */ + kfree(claim); + return; + } + } else { + claim->lasttime = jiffies; + if (claim->backbone_gw == backbone_gw) + /* no need to register a new backbone */ + goto claim_free_ref; + + bat_dbg(DBG_BLA, bat_priv, + "bla_add_claim(): changing ownership for %pM, vid %d\n", + mac, vid); + + claim->backbone_gw->crc ^= + crc16(0, claim->addr, ETH_ALEN); + backbone_gw_free_ref(claim->backbone_gw); + + } + /* set (new) backbone gw */ + atomic_inc(&backbone_gw->refcount); + claim->backbone_gw = backbone_gw; + + backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + backbone_gw->lasttime = jiffies; + +claim_free_ref: + claim_free_ref(claim); +} + +/* Delete a claim from the claim hash which has the + * given mac address and vid. + */ +static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac, + const short vid) +{ + struct claim search_claim, *claim; + + memcpy(search_claim.addr, mac, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + if (!claim) + return; + + bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid); + + hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim); + claim_free_ref(claim); /* reference from the hash is gone */ + + claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + + /* don't need the reference from hash_find() anymore */ + claim_free_ref(claim); +} + +/* check for ANNOUNCE frame, return 1 if handled */ +static int handle_announce(struct bat_priv *bat_priv, + uint8_t *an_addr, uint8_t *backbone_addr, short vid) +{ + struct backbone_gw *backbone_gw; + uint16_t crc; + + if (memcmp(an_addr, announce_mac, 4) != 0) + return 0; + + backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); + + if (unlikely(!backbone_gw)) + return 1; + + + /* handle as ANNOUNCE frame */ + backbone_gw->lasttime = jiffies; + crc = ntohs(*((uint16_t *)(&an_addr[4]))); + + bat_dbg(DBG_BLA, bat_priv, + "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", + vid, backbone_gw->orig, crc); + + if (backbone_gw->crc != crc) { + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", + backbone_gw->orig, backbone_gw->vid, backbone_gw->crc, + crc); + + bla_send_request(backbone_gw); + } else { + /* if we have sent a request and the crc was OK, + * we can allow traffic again. + */ + if (atomic_read(&backbone_gw->request_sent)) { + atomic_dec(&backbone_gw->bat_priv->bla_num_requests); + atomic_set(&backbone_gw->request_sent, 0); + } + } + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* check for REQUEST frame, return 1 if handled */ +static int handle_request(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *backbone_addr, + struct ethhdr *ethhdr, short vid) +{ + /* check for REQUEST frame */ + if (!compare_eth(backbone_addr, ethhdr->h_dest)) + return 0; + + /* sanity check, this should not happen on a normal switch, + * we ignore it in this case. + */ + if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) + return 1; + + bat_dbg(DBG_BLA, bat_priv, + "handle_request(): REQUEST vid %d (sent by %pM)...\n", + vid, ethhdr->h_source); + + bla_answer_request(bat_priv, primary_if, vid); + return 1; +} + +/* check for UNCLAIM frame, return 1 if handled */ +static int handle_unclaim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *backbone_addr, + uint8_t *claim_addr, short vid) +{ + struct backbone_gw *backbone_gw; + + /* unclaim in any case if it is our own */ + if (primary_if && compare_eth(backbone_addr, + primary_if->net_dev->dev_addr)) + bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL); + + backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid); + + if (!backbone_gw) + return 1; + + /* this must be an UNCLAIM frame */ + bat_dbg(DBG_BLA, bat_priv, + "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", + claim_addr, vid, backbone_gw->orig); + + bla_del_claim(bat_priv, claim_addr, vid); + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* check for CLAIM frame, return 1 if handled */ +static int handle_claim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, uint8_t *backbone_addr, + uint8_t *claim_addr, short vid) +{ + struct backbone_gw *backbone_gw; + + /* register the gateway if not yet available, and add the claim. */ + + backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); + + if (unlikely(!backbone_gw)) + return 1; + + /* this must be a CLAIM frame */ + bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); + if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) + bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD); + + /* TODO: we could call something like tt_local_del() here. */ + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @hw_src: the Hardware source in the ARP Header + * @hw_dst: the Hardware destination in the ARP Header + * @ethhdr: pointer to the Ethernet header of the claim frame + * + * checks if it is a claim packet and if its on the same group. + * This function also applies the group ID of the sender + * if it is in the same mesh. + * + * returns: + * 2 - if it is a claim packet and on the same group + * 1 - if is a claim packet from another group + * 0 - if it is not a claim packet + */ +static int check_claim_group(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *hw_src, uint8_t *hw_dst, + struct ethhdr *ethhdr) +{ + uint8_t *backbone_addr; + struct orig_node *orig_node; + struct bla_claim_dst *bla_dst, *bla_dst_own; + + bla_dst = (struct bla_claim_dst *)hw_dst; + bla_dst_own = &bat_priv->claim_dest; + + /* check if it is a claim packet in general */ + if (memcmp(bla_dst->magic, bla_dst_own->magic, + sizeof(bla_dst->magic)) != 0) + return 0; + + /* if announcement packet, use the source, + * otherwise assume it is in the hw_src + */ + switch (bla_dst->type) { + case CLAIM_TYPE_ADD: + backbone_addr = hw_src; + break; + case CLAIM_TYPE_REQUEST: + case CLAIM_TYPE_ANNOUNCE: + case CLAIM_TYPE_DEL: + backbone_addr = ethhdr->h_source; + break; + default: + return 0; + } + + /* don't accept claim frames from ourselves */ + if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) + return 0; + + /* if its already the same group, it is fine. */ + if (bla_dst->group == bla_dst_own->group) + return 2; + + /* lets see if this originator is in our mesh */ + orig_node = orig_hash_find(bat_priv, backbone_addr); + + /* dont accept claims from gateways which are not in + * the same mesh or group. + */ + if (!orig_node) + return 1; + + /* if our mesh friends mac is bigger, use it for ourselves. */ + if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { + bat_dbg(DBG_BLA, bat_priv, + "taking other backbones claim group: %04x\n", + ntohs(bla_dst->group)); + bla_dst_own->group = bla_dst->group; + } + + orig_node_free_ref(orig_node); + + return 2; +} + + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * + * Check if this is a claim frame, and process it accordingly. + * + * returns 1 if it was a claim frame, otherwise return 0 to + * tell the callee that it can use the frame on its own. + */ +static int bla_process_claim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct sk_buff *skb) +{ + struct ethhdr *ethhdr; + struct vlan_ethhdr *vhdr; + struct arphdr *arphdr; + uint8_t *hw_src, *hw_dst; + struct bla_claim_dst *bla_dst; + uint16_t proto; + int headlen; + short vid = -1; + int ret; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { + vhdr = (struct vlan_ethhdr *)ethhdr; + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + proto = ntohs(vhdr->h_vlan_encapsulated_proto); + headlen = sizeof(*vhdr); + } else { + proto = ntohs(ethhdr->h_proto); + headlen = ETH_HLEN; + } + + if (proto != ETH_P_ARP) + return 0; /* not a claim frame */ + + /* this must be a ARP frame. check if it is a claim. */ + + if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) + return 0; + + /* pskb_may_pull() may have modified the pointers, get ethhdr again */ + ethhdr = (struct ethhdr *)skb_mac_header(skb); + arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen); + + /* Check whether the ARP frame carries a valid + * IP information + */ + + if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) + return 0; + if (arphdr->ar_pro != htons(ETH_P_IP)) + return 0; + if (arphdr->ar_hln != ETH_ALEN) + return 0; + if (arphdr->ar_pln != 4) + return 0; + + hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); + hw_dst = hw_src + ETH_ALEN + 4; + bla_dst = (struct bla_claim_dst *)hw_dst; + + /* check if it is a claim frame. */ + ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); + if (ret == 1) + bat_dbg(DBG_BLA, bat_priv, + "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", + ethhdr->h_source, vid, hw_src, hw_dst); + + if (ret < 2) + return ret; + + /* become a backbone gw ourselves on this vlan if not happened yet */ + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + + /* check for the different types of claim frames ... */ + switch (bla_dst->type) { + case CLAIM_TYPE_ADD: + if (handle_claim(bat_priv, primary_if, hw_src, + ethhdr->h_source, vid)) + return 1; + break; + case CLAIM_TYPE_DEL: + if (handle_unclaim(bat_priv, primary_if, + ethhdr->h_source, hw_src, vid)) + return 1; + break; + + case CLAIM_TYPE_ANNOUNCE: + if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) + return 1; + break; + case CLAIM_TYPE_REQUEST: + if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) + return 1; + break; + } + + bat_dbg(DBG_BLA, bat_priv, + "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", + ethhdr->h_source, vid, hw_src, hw_dst); + return 1; +} + +/* Check when we last heard from other nodes, and remove them in case of + * a time out, or clean all backbone gws if now is set. + */ +static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now) +{ + struct backbone_gw *backbone_gw; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct hashtable_t *hash; + spinlock_t *list_lock; /* protects write access to the hash lists */ + int i; + + hash = bat_priv->backbone_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(backbone_gw, node, node_tmp, + head, hash_entry) { + if (now) + goto purge_now; + if (!has_timed_out(backbone_gw->lasttime, + BLA_BACKBONE_TIMEOUT)) + continue; + + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "bla_purge_backbone_gw(): backbone gw %pM timed out\n", + backbone_gw->orig); + +purge_now: + /* don't wait for the pending request anymore */ + if (atomic_read(&backbone_gw->request_sent)) + atomic_dec(&bat_priv->bla_num_requests); + + bla_del_backbone_claims(backbone_gw); + + hlist_del_rcu(node); + backbone_gw_free_ref(backbone_gw); + } + spin_unlock_bh(list_lock); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @primary_if: the selected primary interface, may be NULL if now is set + * @now: whether the whole hash shall be wiped now + * + * Check when we heard last time from our own claims, and remove them in case of + * a time out, or clean all claims if now is set + */ +static void bla_purge_claims(struct bat_priv *bat_priv, + struct hard_iface *primary_if, int now) +{ + struct claim *claim; + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + int i; + + hash = bat_priv->claim_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + if (now) + goto purge_now; + if (!compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) + continue; + if (!has_timed_out(claim->lasttime, + BLA_CLAIM_TIMEOUT)) + continue; + + bat_dbg(DBG_BLA, bat_priv, + "bla_purge_claims(): %pM, vid %d, time out\n", + claim->addr, claim->vid); + +purge_now: + handle_unclaim(bat_priv, primary_if, + claim->backbone_gw->orig, + claim->addr, claim->vid); + } + rcu_read_unlock(); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @primary_if: the new selected primary_if + * @oldif: the old primary interface, may be NULL + * + * Update the backbone gateways when the own orig address changes. + * + */ +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) +{ + struct backbone_gw *backbone_gw; + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + int i; + + /* reset bridge loop avoidance group id */ + bat_priv->claim_dest.group = + htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); + + if (!oldif) { + bla_purge_claims(bat_priv, NULL, 1); + bla_purge_backbone_gw(bat_priv, 1); + return; + } + + hash = bat_priv->backbone_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + /* own orig still holds the old value. */ + if (!compare_eth(backbone_gw->orig, + oldif->net_dev->dev_addr)) + continue; + + memcpy(backbone_gw->orig, + primary_if->net_dev->dev_addr, ETH_ALEN); + /* send an announce frame so others will ask for our + * claims and update their tables. + */ + bla_send_announce(bat_priv, backbone_gw); + } + rcu_read_unlock(); + } +} + + + +/* (re)start the timer */ +static void bla_start_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work); + queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work, + msecs_to_jiffies(BLA_PERIOD_LENGTH)); +} + +/* periodic work to do: + * * purge structures when they are too old + * * send announcements + */ +static void bla_periodic_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, bla_work); + struct hlist_node *node; + struct hlist_head *head; + struct backbone_gw *backbone_gw; + struct hashtable_t *hash; + struct hard_iface *primary_if; + int i; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + bla_purge_claims(bat_priv, primary_if, 0); + bla_purge_backbone_gw(bat_priv, 0); + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto out; + + hash = bat_priv->backbone_hash; + if (!hash) + goto out; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (!compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr)) + continue; + + backbone_gw->lasttime = jiffies; + + bla_send_announce(bat_priv, backbone_gw); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + + bla_start_timer(bat_priv); +} + +/* initialize all bla structures */ +int bla_init(struct bat_priv *bat_priv) +{ + int i; + uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; + struct hard_iface *primary_if; + + bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); + + /* setting claim destination address */ + memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); + bat_priv->claim_dest.type = 0; + primary_if = primary_if_get_selected(bat_priv); + if (primary_if) { + bat_priv->claim_dest.group = + htons(crc16(0, primary_if->net_dev->dev_addr, + ETH_ALEN)); + hardif_free_ref(primary_if); + } else { + bat_priv->claim_dest.group = 0; /* will be set later */ + } + + /* initialize the duplicate list */ + for (i = 0; i < DUPLIST_SIZE; i++) + bat_priv->bcast_duplist[i].entrytime = + jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT); + bat_priv->bcast_duplist_curr = 0; + + if (bat_priv->claim_hash) + return 1; + + bat_priv->claim_hash = hash_new(128); + bat_priv->backbone_hash = hash_new(32); + + if (!bat_priv->claim_hash || !bat_priv->backbone_hash) + return -1; + + bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n"); + + bla_start_timer(bat_priv); + return 1; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @bcast_packet: originator mac address + * @hdr_size: maximum length of the frame + * + * check if it is on our broadcast list. Another gateway might + * have sent the same packet because it is connected to the same backbone, + * so we have to remove this duplicate. + * + * This is performed by checking the CRC, which will tell us + * with a good chance that it is the same packet. If it is furthermore + * sent by another host, drop it. We allow equal packets from + * the same host however as this might be intended. + * + **/ + +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) +{ + int i, length, curr; + uint8_t *content; + uint16_t crc; + struct bcast_duplist_entry *entry; + + length = hdr_size - sizeof(*bcast_packet); + content = (uint8_t *)bcast_packet; + content += sizeof(*bcast_packet); + + /* calculate the crc ... */ + crc = crc16(0, content, length); + + for (i = 0 ; i < DUPLIST_SIZE; i++) { + curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE; + entry = &bat_priv->bcast_duplist[curr]; + + /* we can stop searching if the entry is too old ; + * later entries will be even older + */ + if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT)) + break; + + if (entry->crc != crc) + continue; + + if (compare_eth(entry->orig, bcast_packet->orig)) + continue; + + /* this entry seems to match: same crc, not too old, + * and from another gw. therefore return 1 to forbid it. + */ + return 1; + } + /* not found, add a new entry (overwrite the oldest entry) */ + curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE; + entry = &bat_priv->bcast_duplist[curr]; + entry->crc = crc; + entry->entrytime = jiffies; + memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); + bat_priv->bcast_duplist_curr = curr; + + /* allow it, its the first occurence. */ + return 0; +} + + + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: originator mac address + * + * check if the originator is a gateway for any VLAN ID. + * + * returns 1 if it is found, 0 otherwise + * + */ + +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig) +{ + struct hashtable_t *hash = bat_priv->backbone_hash; + struct hlist_head *head; + struct hlist_node *node; + struct backbone_gw *backbone_gw; + int i; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + return 0; + + if (!hash) + return 0; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (compare_eth(backbone_gw->orig, orig)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + } + + return 0; +} + + +/** + * @skb: the frame to be checked + * @orig_node: the orig_node of the frame + * @hdr_size: maximum length of the frame + * + * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 + * if the orig_node is also a gateway on the soft interface, otherwise it + * returns 0. + * + */ +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size) +{ + struct ethhdr *ethhdr; + struct vlan_ethhdr *vhdr; + struct backbone_gw *backbone_gw; + short vid = -1; + + if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) + return 0; + + /* first, find out the vid. */ + if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) + return 0; + + ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size); + + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { + if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) + return 0; + + vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) + + hdr_size); + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + } + + /* see if this originator is a backbone gw for this VLAN */ + + backbone_gw = backbone_hash_find(orig_node->bat_priv, + orig_node->orig, vid); + if (!backbone_gw) + return 0; + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* free all bla structures (for softinterface free or module unload) */ +void bla_free(struct bat_priv *bat_priv) +{ + struct hard_iface *primary_if; + + cancel_delayed_work_sync(&bat_priv->bla_work); + primary_if = primary_if_get_selected(bat_priv); + + if (bat_priv->claim_hash) { + bla_purge_claims(bat_priv, primary_if, 1); + hash_destroy(bat_priv->claim_hash); + bat_priv->claim_hash = NULL; + } + if (bat_priv->backbone_hash) { + bla_purge_backbone_gw(bat_priv, 1); + hash_destroy(bat_priv->backbone_hash); + bat_priv->backbone_hash = NULL; + } + if (primary_if) + hardif_free_ref(primary_if); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * @vid: the VLAN ID of the frame + * @is_bcast: the packet came in a broadcast packet type. + * + * bla_rx avoidance checks if: + * * we have to race for a claim + * * if the frame is allowed on the LAN + * + * in these cases, the skb is further handled by this function and + * returns 1, otherwise it returns 0 and the caller shall further + * process the skb. + * + */ +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, + bool is_bcast) +{ + struct ethhdr *ethhdr; + struct claim search_claim, *claim = NULL; + struct hard_iface *primary_if; + int ret; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto handled; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto allow; + + + if (unlikely(atomic_read(&bat_priv->bla_num_requests))) + /* don't allow broadcasts while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) + goto handled; + + memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + + if (!claim) { + /* possible optimization: race for a claim */ + /* No claim exists yet, claim it for us! + */ + handle_claim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } + + /* if it is our own claim ... */ + if (compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) { + /* ... allow it in any case */ + claim->lasttime = jiffies; + goto allow; + } + + /* if it is a broadcast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { + /* ... drop it. the responsible gateway is in charge. + * + * We need to check is_bcast because with the gateway + * feature, broadcasts (like DHCP requests) may be sent + * using a unicast packet type. + */ + goto handled; + } else { + /* seems the client considers us as its best gateway. + * send a claim and update the claim table + * immediately. + */ + handle_claim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } +allow: + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + ret = 0; + goto out; + +handled: + kfree_skb(skb); + ret = 1; + +out: + if (primary_if) + hardif_free_ref(primary_if); + if (claim) + claim_free_ref(claim); + return ret; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * @vid: the VLAN ID of the frame + * + * bla_tx checks if: + * * a claim was received which has to be processed + * * the frame is allowed on the mesh + * + * in these cases, the skb is further handled by this function and + * returns 1, otherwise it returns 0 and the caller shall further + * process the skb. + * + */ +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) +{ + struct ethhdr *ethhdr; + struct claim search_claim, *claim = NULL; + struct hard_iface *primary_if; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto allow; + + /* in VLAN case, the mac header might not be set. */ + skb_reset_mac_header(skb); + + if (bla_process_claim(bat_priv, primary_if, skb)) + goto handled; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + if (unlikely(atomic_read(&bat_priv->bla_num_requests))) + /* don't allow broadcasts while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + goto handled; + + memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); + search_claim.vid = vid; + + claim = claim_hash_find(bat_priv, &search_claim); + + /* if no claim exists, allow it. */ + if (!claim) + goto allow; + + /* check if we are responsible. */ + if (compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) { + /* if yes, the client has roamed and we have + * to unclaim it. + */ + handle_unclaim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } + + /* check if it is a multicast/broadcast frame */ + if (is_multicast_ether_addr(ethhdr->h_dest)) { + /* drop it. the responsible gateway has forwarded it into + * the backbone network. + */ + goto handled; + } else { + /* we must allow it. at least if we are + * responsible for the DESTINATION. + */ + goto allow; + } +allow: + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + ret = 0; + goto out; +handled: + ret = 1; +out: + if (primary_if) + hardif_free_ref(primary_if); + if (claim) + claim_free_ref(claim); + return ret; +} + +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hashtable_t *hash = bat_priv->claim_hash; + struct claim *claim; + struct hard_iface *primary_if; + struct hlist_node *node; + struct hlist_head *head; + uint32_t i; + bool is_own; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, + "Claims announced for the mesh %s (orig %pM, group id %04x)\n", + net_dev->name, primary_if->net_dev->dev_addr, + ntohs(bat_priv->claim_dest.group)); + seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", + "Client", "VID", "Originator", "CRC"); + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + is_own = compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr); + seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", + claim->addr, claim->vid, + claim->backbone_gw->orig, + (is_own ? 'x' : ' '), + claim->backbone_gw->crc); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h new file mode 100644 index 00000000000..dc5227b398d --- /dev/null +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_BLA_H_ +#define _NET_BATMAN_ADV_BLA_H_ + +#ifdef CONFIG_BATMAN_ADV_BLA +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, + bool is_bcast); +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size); +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, int hdr_size); +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif); +int bla_init(struct bat_priv *bat_priv); +void bla_free(struct bat_priv *bat_priv); + +#define BLA_CRC_INIT 0 +#else /* ifdef CONFIG_BATMAN_ADV_BLA */ + +static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid, bool is_bcast) +{ + return 0; +} + +static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid) +{ + return 0; +} + +static inline int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, + int hdr_size) +{ + return 0; +} + +static inline int bla_claim_table_seq_print_text(struct seq_file *seq, + void *offset) +{ + return 0; +} + +static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, + uint8_t *orig) +{ + return 0; +} + +static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) +{ + return 0; +} + +static inline void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) +{ +} + +static inline int bla_init(struct bat_priv *bat_priv) +{ + return 1; +} + +static inline void bla_free(struct bat_priv *bat_priv) +{ +} + +#endif /* ifdef CONFIG_BATMAN_ADV_BLA */ + +#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */ diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 6f9b9b78f77..47f7186dcef 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -558,10 +558,10 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) p++; /* ...and then we jump over the data */ - if (pkt_len < *p) + if (pkt_len < 1 + (*p)) goto out; - pkt_len -= *p; - p += (*p); + pkt_len -= 1 + (*p); + p += 1 + (*p); } } out: diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 377897701a8..dc334fa8984 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -28,15 +28,10 @@ #include "bat_sysfs.h" #include "originator.h" #include "hash.h" +#include "bridge_loop_avoidance.h" #include <linux/if_arp.h> - -static int batman_skb_recv(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev); - void hardif_free_rcu(struct rcu_head *rcu) { struct hard_iface *hard_iface; @@ -107,7 +102,8 @@ out: return hard_iface; } -static void primary_if_update_addr(struct bat_priv *bat_priv) +static void primary_if_update_addr(struct bat_priv *bat_priv, + struct hard_iface *oldif) { struct vis_packet *vis_packet; struct hard_iface *primary_if; @@ -122,6 +118,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv) memcpy(vis_packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); + bla_update_orig_address(bat_priv, primary_if, oldif); out: if (primary_if) hardif_free_ref(primary_if); @@ -140,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv, curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); - if (curr_hard_iface) - hardif_free_ref(curr_hard_iface); - if (!new_hard_iface) - return; + goto out; + + bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface); + primary_if_update_addr(bat_priv, curr_hard_iface); - bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); - primary_if_update_addr(bat_priv); +out: + if (curr_hard_iface) + hardif_free_ref(curr_hard_iface); } static bool hardif_is_iface_up(const struct hard_iface *hard_iface) @@ -175,9 +173,9 @@ static void check_known_mac_addr(const struct net_device *net_dev) net_dev->dev_addr)) continue; - pr_warning("The newly added mac address (%pM) already exists on: %s\n", - net_dev->dev_addr, hard_iface->net_dev->name); - pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); + pr_warn("The newly added mac address (%pM) already exists on: %s\n", + net_dev->dev_addr, hard_iface->net_dev->name); + pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); } rcu_read_unlock(); } @@ -230,7 +228,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface) bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); hard_iface->if_status = IF_TO_BE_ACTIVATED; /** @@ -300,22 +298,17 @@ int hardif_enable_interface(struct hard_iface *hard_iface, if (!softif_is_valid(soft_iface)) { pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", soft_iface->name); - dev_put(soft_iface); ret = -EINVAL; - goto err; + goto err_dev; } hard_iface->soft_iface = soft_iface; bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_init(hard_iface); - - if (!hard_iface->packet_buff) { - bat_err(hard_iface->soft_iface, - "Can't add interface packet (%s): out of memory\n", - hard_iface->net_dev->name); + ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); + if (ret < 0) { ret = -ENOMEM; - goto err; + goto err_dev; } hard_iface->if_num = bat_priv->num_ifaces; @@ -328,7 +321,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface, hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; dev_add_pack(&hard_iface->batman_adv_ptype); - atomic_set(&hard_iface->seqno, 1); atomic_set(&hard_iface->frag_seqno, 1); bat_info(hard_iface->soft_iface, "Adding interface: %s\n", hard_iface->net_dev->name); @@ -360,6 +352,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface, out: return 0; +err_dev: + dev_put(soft_iface); err: hardif_free_ref(hard_iface); return ret; @@ -394,8 +388,7 @@ void hardif_disable_interface(struct hard_iface *hard_iface) hardif_free_ref(new_if); } - kfree(hard_iface->packet_buff); - hard_iface->packet_buff = NULL; + bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); hard_iface->if_status = IF_NOT_IN_USE; /* delete all references to this hard_iface */ @@ -447,6 +440,13 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev) check_known_mac_addr(hard_iface->net_dev); list_add_tail_rcu(&hard_iface->list, &hardif_list); + /** + * This can't be called via a bat_priv callback because + * we have no bat_priv yet. + */ + atomic_set(&hard_iface->seqno, 1); + hard_iface->packet_buff = NULL; + return hard_iface; free_if: @@ -524,14 +524,14 @@ static int hard_if_event(struct notifier_block *this, check_known_mac_addr(hard_iface->net_dev); bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto hardif_put; if (hard_iface == primary_if) - primary_if_update_addr(bat_priv); + primary_if_update_addr(bat_priv, NULL); break; default: break; @@ -545,114 +545,6 @@ out: return NOTIFY_DONE; } -/* incoming packets with the batman ethertype received on any active hard - * interface */ -static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev) -{ - struct bat_priv *bat_priv; - struct batman_ogm_packet *batman_ogm_packet; - struct hard_iface *hard_iface; - int ret; - - hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); - skb = skb_share_check(skb, GFP_ATOMIC); - - /* skb was released by skb_share_check() */ - if (!skb) - goto err_out; - - /* packet should hold at least type and version */ - if (unlikely(!pskb_may_pull(skb, 2))) - goto err_free; - - /* expect a valid ethernet header here. */ - if (unlikely(skb->mac_len != sizeof(struct ethhdr) || - !skb_mac_header(skb))) - goto err_free; - - if (!hard_iface->soft_iface) - goto err_free; - - bat_priv = netdev_priv(hard_iface->soft_iface); - - if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) - goto err_free; - - /* discard frames on not active interfaces */ - if (hard_iface->if_status != IF_ACTIVE) - goto err_free; - - batman_ogm_packet = (struct batman_ogm_packet *)skb->data; - - if (batman_ogm_packet->header.version != COMPAT_VERSION) { - bat_dbg(DBG_BATMAN, bat_priv, - "Drop packet: incompatible batman version (%i)\n", - batman_ogm_packet->header.version); - goto err_free; - } - - /* all receive handlers return whether they received or reused - * the supplied skb. if not, we have to free the skb. */ - - switch (batman_ogm_packet->header.packet_type) { - /* batman originator packet */ - case BAT_OGM: - ret = recv_bat_ogm_packet(skb, hard_iface); - break; - - /* batman icmp packet */ - case BAT_ICMP: - ret = recv_icmp_packet(skb, hard_iface); - break; - - /* unicast packet */ - case BAT_UNICAST: - ret = recv_unicast_packet(skb, hard_iface); - break; - - /* fragmented unicast packet */ - case BAT_UNICAST_FRAG: - ret = recv_ucast_frag_packet(skb, hard_iface); - break; - - /* broadcast packet */ - case BAT_BCAST: - ret = recv_bcast_packet(skb, hard_iface); - break; - - /* vis packet */ - case BAT_VIS: - ret = recv_vis_packet(skb, hard_iface); - break; - /* Translation table query (request or response) */ - case BAT_TT_QUERY: - ret = recv_tt_query(skb, hard_iface); - break; - /* Roaming advertisement */ - case BAT_ROAM_ADV: - ret = recv_roam_adv(skb, hard_iface); - break; - default: - ret = NET_RX_DROP; - } - - if (ret == NET_RX_DROP) - kfree_skb(skb); - - /* return NET_RX_SUCCESS in any case as we - * most probably dropped the packet for - * routing-logical reasons. */ - - return NET_RX_SUCCESS; - -err_free: - kfree_skb(skb); -err_out: - return NET_RX_DROP; -} - /* This function returns true if the interface represented by ifindex is a * 802.11 wireless device */ bool is_wifi_iface(int ifindex) diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index b87518edcef..2e98a57f340 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -175,13 +175,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, if (len >= sizeof(struct icmp_packet_rr)) packet_len = sizeof(struct icmp_packet_rr); - skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); + skb = dev_alloc_skb(packet_len + ETH_HLEN); if (!skb) { len = -ENOMEM; goto out; } - skb_reserve(skb, sizeof(struct ethhdr)); + skb_reserve(skb, ETH_HLEN); icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); if (copy_from_user(icmp_packet, buff, packet_len)) { diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 6d51caaf8ce..083a2993efe 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -30,6 +30,7 @@ #include "translation-table.h" #include "hard-interface.h" #include "gateway_client.h" +#include "bridge_loop_avoidance.h" #include "vis.h" #include "hash.h" #include "bat_algo.h" @@ -38,6 +39,7 @@ /* List manipulations on hardif_list have to be rtnl_lock()'ed, * list traversals just rcu-locked */ struct list_head hardif_list; +static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *); char bat_routing_algo[20] = "BATMAN IV"; static struct hlist_head bat_algo_list; @@ -45,11 +47,15 @@ unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct workqueue_struct *bat_event_workqueue; +static void recv_handler_init(void); + static int __init batman_init(void) { INIT_LIST_HEAD(&hardif_list); INIT_HLIST_HEAD(&bat_algo_list); + recv_handler_init(); + bat_iv_init(); /* the name should not be longer than 10 chars - see @@ -96,13 +102,10 @@ int mesh_init(struct net_device *soft_iface) spin_lock_init(&bat_priv->gw_list_lock); spin_lock_init(&bat_priv->vis_hash_lock); spin_lock_init(&bat_priv->vis_list_lock); - spin_lock_init(&bat_priv->softif_neigh_lock); - spin_lock_init(&bat_priv->softif_neigh_vid_lock); INIT_HLIST_HEAD(&bat_priv->forw_bat_list); INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); INIT_HLIST_HEAD(&bat_priv->gw_list); - INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); INIT_LIST_HEAD(&bat_priv->tt_changes_list); INIT_LIST_HEAD(&bat_priv->tt_req_list); INIT_LIST_HEAD(&bat_priv->tt_roam_list); @@ -118,6 +121,9 @@ int mesh_init(struct net_device *soft_iface) if (vis_init(bat_priv) < 1) goto err; + if (bla_init(bat_priv) < 1) + goto err; + atomic_set(&bat_priv->gw_reselect, 0); atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); goto end; @@ -145,7 +151,7 @@ void mesh_free(struct net_device *soft_iface) tt_free(bat_priv); - softif_neigh_purge(bat_priv); + bla_free(bat_priv); atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); } @@ -178,6 +184,120 @@ int is_my_mac(const uint8_t *addr) return 0; } +static int recv_unhandled_packet(struct sk_buff *skb, + struct hard_iface *recv_if) +{ + return NET_RX_DROP; +} + +/* incoming packets with the batman ethertype received on any active hard + * interface + */ +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) +{ + struct bat_priv *bat_priv; + struct batman_ogm_packet *batman_ogm_packet; + struct hard_iface *hard_iface; + uint8_t idx; + int ret; + + hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); + skb = skb_share_check(skb, GFP_ATOMIC); + + /* skb was released by skb_share_check() */ + if (!skb) + goto err_out; + + /* packet should hold at least type and version */ + if (unlikely(!pskb_may_pull(skb, 2))) + goto err_free; + + /* expect a valid ethernet header here. */ + if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) + goto err_free; + + if (!hard_iface->soft_iface) + goto err_free; + + bat_priv = netdev_priv(hard_iface->soft_iface); + + if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) + goto err_free; + + /* discard frames on not active interfaces */ + if (hard_iface->if_status != IF_ACTIVE) + goto err_free; + + batman_ogm_packet = (struct batman_ogm_packet *)skb->data; + + if (batman_ogm_packet->header.version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_ogm_packet->header.version); + goto err_free; + } + + /* all receive handlers return whether they received or reused + * the supplied skb. if not, we have to free the skb. + */ + idx = batman_ogm_packet->header.packet_type; + ret = (*recv_packet_handler[idx])(skb, hard_iface); + + if (ret == NET_RX_DROP) + kfree_skb(skb); + + /* return NET_RX_SUCCESS in any case as we + * most probably dropped the packet for + * routing-logical reasons. + */ + return NET_RX_SUCCESS; + +err_free: + kfree_skb(skb); +err_out: + return NET_RX_DROP; +} + +static void recv_handler_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++) + recv_packet_handler[i] = recv_unhandled_packet; + + /* batman icmp packet */ + recv_packet_handler[BAT_ICMP] = recv_icmp_packet; + /* unicast packet */ + recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; + /* fragmented unicast packet */ + recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet; + /* broadcast packet */ + recv_packet_handler[BAT_BCAST] = recv_bcast_packet; + /* vis packet */ + recv_packet_handler[BAT_VIS] = recv_vis_packet; + /* Translation table query (request or response) */ + recv_packet_handler[BAT_TT_QUERY] = recv_tt_query; + /* Roaming advertisement */ + recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv; +} + +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)) +{ + if (recv_packet_handler[packet_type] != &recv_unhandled_packet) + return -EBUSY; + + recv_packet_handler[packet_type] = recv_handler; + return 0; +} + +void recv_handler_unregister(uint8_t packet_type) +{ + recv_packet_handler[packet_type] = recv_unhandled_packet; +} + static struct bat_algo_ops *bat_algo_get(char *name) { struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; @@ -207,12 +327,12 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops) } /* all algorithms must implement all ops (for now) */ - if (!bat_algo_ops->bat_ogm_init || - !bat_algo_ops->bat_ogm_init_primary || - !bat_algo_ops->bat_ogm_update_mac || + if (!bat_algo_ops->bat_iface_enable || + !bat_algo_ops->bat_iface_disable || + !bat_algo_ops->bat_iface_update_mac || + !bat_algo_ops->bat_primary_iface_set || !bat_algo_ops->bat_ogm_schedule || - !bat_algo_ops->bat_ogm_emit || - !bat_algo_ops->bat_ogm_receive) { + !bat_algo_ops->bat_ogm_emit) { pr_info("Routing algo '%s' does not implement required ops\n", bat_algo_ops->name); goto out; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 94fa1c2393a..f4a3ec00347 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -28,7 +28,7 @@ #define DRIVER_DEVICE "batman-adv" #ifndef SOURCE_VERSION -#define SOURCE_VERSION "2012.1.0" +#define SOURCE_VERSION "2012.2.0" #endif /* B.A.T.M.A.N. parameters */ @@ -65,7 +65,7 @@ #define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ -#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) +#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE) #define LOG_BUF_LEN 8192 /* has to be a power of 2 */ @@ -80,8 +80,12 @@ #define MAX_AGGREGATION_BYTES 512 #define MAX_AGGREGATION_MS 100 -#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ +#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */ +#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3) +#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10) +#define DUPLIST_SIZE 16 +#define DUPLIST_TIMEOUT 500 /* 500 ms */ /* don't reset again within 30 seconds */ #define RESET_PROTECTION_MS 30000 #define EXPECTED_SEQNO_RANGE 65536 @@ -119,7 +123,8 @@ enum dbg_level { DBG_BATMAN = 1 << 0, DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ DBG_TT = 1 << 2, /* translation table operations */ - DBG_ALL = 7 + DBG_BLA = 1 << 3, /* bridge loop avoidance */ + DBG_ALL = 15 }; /* Kernel headers */ @@ -150,6 +155,12 @@ void mesh_free(struct net_device *soft_iface); void inc_module_count(void); void dec_module_count(void); int is_my_mac(const uint8_t *addr); +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev); +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)); +void recv_handler_unregister(uint8_t packet_type); int bat_algo_register(struct bat_algo_ops *bat_algo_ops); int bat_algo_select(struct bat_priv *bat_priv, char *name); int bat_algo_seq_print_text(struct seq_file *seq, void *offset); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 43c0a4f1399..41147942ba5 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -28,13 +28,15 @@ #include "hard-interface.h" #include "unicast.h" #include "soft-interface.h" +#include "bridge_loop_avoidance.h" static void purge_orig(struct work_struct *work); static void start_purge_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); - queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); + queue_delayed_work(bat_event_workqueue, + &bat_priv->orig_work, msecs_to_jiffies(1000)); } /* returns 1 if they are the same originator */ @@ -83,35 +85,30 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node) return router; } -struct neigh_node *create_neighbor(struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const uint8_t *neigh, - struct hard_iface *if_incoming) +struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + uint32_t seqno) { - struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct neigh_node *neigh_node; - bat_dbg(DBG_BATMAN, bat_priv, - "Creating new last-hop neighbor of originator\n"); - neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); if (!neigh_node) - return NULL; + goto out; INIT_HLIST_NODE(&neigh_node->list); - INIT_LIST_HEAD(&neigh_node->bonding_list); - spin_lock_init(&neigh_node->tq_lock); - memcpy(neigh_node->addr, neigh, ETH_ALEN); - neigh_node->orig_node = orig_neigh_node; - neigh_node->if_incoming = if_incoming; + memcpy(neigh_node->addr, neigh_addr, ETH_ALEN); + spin_lock_init(&neigh_node->lq_update_lock); /* extra reference for return */ atomic_set(&neigh_node->refcount, 2); - spin_lock_bh(&orig_node->neigh_list_lock); - hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); - spin_unlock_bh(&orig_node->neigh_list_lock); + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new neighbor %pM, initial seqno %d\n", + neigh_addr, seqno); + +out: return neigh_node; } @@ -273,6 +270,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, struct hlist_node *node, *node_tmp; struct neigh_node *neigh_node; bool neigh_purged = false; + unsigned long last_seen; *best_neigh_node = NULL; @@ -282,11 +280,13 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, hlist_for_each_entry_safe(neigh_node, node, node_tmp, &orig_node->neigh_list, list) { - if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) || + if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) || (neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { + last_seen = neigh_node->last_seen; + if ((neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == @@ -299,9 +299,9 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, neigh_node->if_incoming->net_dev->name); else bat_dbg(DBG_BATMAN, bat_priv, - "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n", + "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", orig_node->orig, neigh_node->addr, - (neigh_node->last_valid / HZ)); + jiffies_to_msecs(last_seen)); neigh_purged = true; @@ -324,10 +324,11 @@ static bool purge_orig_node(struct bat_priv *bat_priv, { struct neigh_node *best_neigh_node; - if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) { + if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) { bat_dbg(DBG_BATMAN, bat_priv, - "Originator timeout: originator %pM, last_valid %lu\n", - orig_node->orig, (orig_node->last_valid / HZ)); + "Originator timeout: originator %pM, last_seen %u\n", + orig_node->orig, + jiffies_to_msecs(orig_node->last_seen)); return true; } else { if (purge_orig_neighbors(bat_priv, orig_node, @@ -375,8 +376,6 @@ static void _purge_orig(struct bat_priv *bat_priv) gw_node_purge(bat_priv); gw_election(bat_priv); - - softif_neigh_purge(bat_priv); } static void purge_orig(struct work_struct *work) @@ -447,9 +446,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) goto next; last_seen_secs = jiffies_to_msecs(jiffies - - orig_node->last_valid) / 1000; + orig_node->last_seen) / 1000; last_seen_msecs = jiffies_to_msecs(jiffies - - orig_node->last_valid) % 1000; + orig_node->last_seen) % 1000; seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", orig_node->orig, last_seen_secs, diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 3fe2eda8565..f74d0d69335 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -29,10 +29,9 @@ void originator_free(struct bat_priv *bat_priv); void purge_orig_ref(struct bat_priv *bat_priv); void orig_node_free_ref(struct orig_node *orig_node); struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); -struct neigh_node *create_neighbor(struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const uint8_t *neigh, - struct hard_iface *if_incoming); +struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + uint32_t seqno); void neigh_node_free_ref(struct neigh_node *neigh_node); struct neigh_node *orig_node_get_router(struct orig_node *orig_node); int orig_seq_print_text(struct seq_file *seq, void *offset); diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 441f3db1bd9..0ee1af77079 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -25,7 +25,7 @@ #define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ enum bat_packettype { - BAT_OGM = 0x01, + BAT_IV_OGM = 0x01, BAT_ICMP = 0x02, BAT_UNICAST = 0x03, BAT_BCAST = 0x04, @@ -38,7 +38,8 @@ enum bat_packettype { /* this file is included by batctl which needs these defines */ #define COMPAT_VERSION 14 -enum batman_flags { +enum batman_iv_flags { + NOT_BEST_NEXT_HOP = 1 << 3, PRIMARIES_FIRST_HOP = 1 << 4, VIS_SERVER = 1 << 5, DIRECTLINK = 1 << 6 @@ -90,6 +91,23 @@ enum tt_client_flags { TT_CLIENT_PENDING = 1 << 10 }; +/* claim frame types for the bridge loop avoidance */ +enum bla_claimframe { + CLAIM_TYPE_ADD = 0x00, + CLAIM_TYPE_DEL = 0x01, + CLAIM_TYPE_ANNOUNCE = 0x02, + CLAIM_TYPE_REQUEST = 0x03 +}; + +/* the destination hardware field in the ARP frame is used to + * transport the claim type and the group id + */ +struct bla_claim_dst { + uint8_t magic[3]; /* FF:43:05 */ + uint8_t type; /* bla_claimframe */ + uint16_t group; /* group id */ +} __packed; + struct batman_header { uint8_t packet_type; uint8_t version; /* batman version field */ @@ -100,8 +118,8 @@ struct batman_ogm_packet { struct batman_header header; uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ uint32_t seqno; - uint8_t orig[6]; - uint8_t prev_sender[6]; + uint8_t orig[ETH_ALEN]; + uint8_t prev_sender[ETH_ALEN]; uint8_t gw_flags; /* flags related to gateway class */ uint8_t tq; uint8_t tt_num_changes; @@ -109,13 +127,13 @@ struct batman_ogm_packet { uint16_t tt_crc; } __packed; -#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet) +#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet) struct icmp_packet { struct batman_header header; uint8_t msg_type; /* see ICMP message types above */ - uint8_t dst[6]; - uint8_t orig[6]; + uint8_t dst[ETH_ALEN]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; uint8_t uid; uint8_t reserved; @@ -128,8 +146,8 @@ struct icmp_packet { struct icmp_packet_rr { struct batman_header header; uint8_t msg_type; /* see ICMP message types above */ - uint8_t dst[6]; - uint8_t orig[6]; + uint8_t dst[ETH_ALEN]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; uint8_t uid; uint8_t rr_cur; @@ -139,16 +157,16 @@ struct icmp_packet_rr { struct unicast_packet { struct batman_header header; uint8_t ttvn; /* destination translation table version number */ - uint8_t dest[6]; + uint8_t dest[ETH_ALEN]; } __packed; struct unicast_frag_packet { struct batman_header header; uint8_t ttvn; /* destination translation table version number */ - uint8_t dest[6]; + uint8_t dest[ETH_ALEN]; uint8_t flags; uint8_t align; - uint8_t orig[6]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; } __packed; @@ -156,7 +174,7 @@ struct bcast_packet { struct batman_header header; uint8_t reserved; uint32_t seqno; - uint8_t orig[6]; + uint8_t orig[ETH_ALEN]; } __packed; struct vis_packet { @@ -165,9 +183,9 @@ struct vis_packet { uint32_t seqno; /* sequence number */ uint8_t entries; /* number of entries behind this struct */ uint8_t reserved; - uint8_t vis_orig[6]; /* originator that announces its neighbors */ - uint8_t target_orig[6]; /* who should receive this packet */ - uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ + uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ + uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */ + uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ } __packed; struct tt_query_packet { diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 7f8e1589941..015471d801b 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -29,6 +29,10 @@ #include "originator.h" #include "vis.h" #include "unicast.h" +#include "bridge_loop_avoidance.h" + +static int route_unicast_packet(struct sk_buff *skb, + struct hard_iface *recv_if); void slide_own_bcast_window(struct hard_iface *hard_iface) { @@ -52,7 +56,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface) bit_get_packet(bat_priv, word, 1, 0); orig_node->bcast_own_sum[hard_iface->if_num] = - bit_packet_count(word); + bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->ogm_cnt_lock); } rcu_read_unlock(); @@ -230,51 +234,46 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, { if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { - if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) { - - *last_reset = jiffies; - bat_dbg(DBG_BATMAN, bat_priv, - "old packet received, start protection\n"); - - return 0; - } else { + if (!has_timed_out(*last_reset, RESET_PROTECTION_MS)) return 1; - } + + *last_reset = jiffies; + bat_dbg(DBG_BATMAN, bat_priv, + "old packet received, start protection\n"); } + return 0; } -int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len) { - struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ - if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN))) - return NET_RX_DROP; + if (unlikely(!pskb_may_pull(skb, header_len))) + return false; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with broadcast indication but unicast recipient */ if (!is_broadcast_ether_addr(ethhdr->h_dest)) - return NET_RX_DROP; + return false; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) - return NET_RX_DROP; + return false; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, 0) < 0) - return NET_RX_DROP; + return false; /* keep skb linear */ if (skb_linearize(skb) < 0) - return NET_RX_DROP; - - bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb); + return false; - kfree_skb(skb); - return NET_RX_SUCCESS; + return true; } static int recv_my_icmp_packet(struct bat_priv *bat_priv, @@ -309,7 +308,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv, goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; @@ -364,7 +363,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet *)skb->data; @@ -450,7 +449,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; @@ -618,6 +617,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) * changes */ if (skb_linearize(skb) < 0) goto out; + /* skb_linearize() possibly changed skb->data */ + tt_query = (struct tt_query_packet *)skb->data; tt_len = tt_query->tt_data * sizeof(struct tt_change); @@ -669,6 +670,13 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) if (!is_my_mac(roam_adv_packet->dst)) return route_unicast_packet(skb, recv_if); + /* check if it is a backbone gateway. we don't accept + * roaming advertisement from it, as it has the same + * entries as we have. + */ + if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src)) + goto out; + orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); if (!orig_node) goto out; @@ -798,7 +806,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size) return 0; } -int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct orig_node *orig_node = NULL; @@ -830,7 +838,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; unicast_packet = (struct unicast_packet *)skb->data; @@ -907,12 +915,20 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, /* Check whether I have to reroute the packet */ if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { - /* Linearize the skb before accessing it */ - if (skb_linearize(skb) < 0) + /* check if there is enough data before accessing it */ + if (pskb_may_pull(skb, sizeof(struct unicast_packet) + + ETH_HLEN) < 0) return 0; ethhdr = (struct ethhdr *)(skb->data + sizeof(struct unicast_packet)); + + /* we don't have an updated route for this client, so we should + * not try to reroute the packet!! + */ + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) + return 1; + orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); if (!orig_node) { @@ -1047,8 +1063,8 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) spin_lock_bh(&orig_node->bcast_seqno_lock); /* check whether the packet is a duplicate */ - if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, - ntohl(bcast_packet->seqno))) + if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno, + ntohl(bcast_packet->seqno))) goto spin_unlock; seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; @@ -1065,9 +1081,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) spin_unlock_bh(&orig_node->bcast_seqno_lock); + /* check whether this has been sent by another originator before */ + if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) + goto out; + /* rebroadcast packet */ add_bcast_packet_to_list(bat_priv, skb, 1); + /* don't hand the broadcast up if it is from an originator + * from the same backbone. + */ + if (bla_is_backbone_gw(skb, orig_node, hdr_size)) + goto out; + /* broadcast for me */ interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); ret = NET_RX_SUCCESS; diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 92ac100d83d..d6bbbebb656 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -23,15 +23,16 @@ #define _NET_BATMAN_ADV_ROUTING_H_ void slide_own_bcast_window(struct hard_iface *hard_iface); +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len); void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node); -int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); -int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); struct neigh_node *find_router(struct bat_priv *bat_priv, diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index af7a6741a68..f47299f22c6 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -45,13 +45,13 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, goto send_skb_err; if (!(hard_iface->net_dev->flags & IFF_UP)) { - pr_warning("Interface %s is not up - can't send packet via that interface!\n", - hard_iface->net_dev->name); + pr_warn("Interface %s is not up - can't send packet via that interface!\n", + hard_iface->net_dev->name); goto send_skb_err; } /* push to the ethernet header. */ - if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) + if (my_skb_head_push(skb, ETH_HLEN) < 0) goto send_skb_err; skb_reset_mac_header(skb); @@ -87,7 +87,7 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface, /* keep old buffer if kmalloc should fail */ if (new_buff) { memcpy(new_buff, hard_iface->packet_buff, - BATMAN_OGM_LEN); + BATMAN_OGM_HLEN); kfree(hard_iface->packet_buff); hard_iface->packet_buff = new_buff; @@ -101,13 +101,13 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv, { int new_len; - new_len = BATMAN_OGM_LEN + + new_len = BATMAN_OGM_HLEN + tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); /* if we have too many changes for one packet don't send any * and wait for the tt table request which will be fragmented */ if (new_len > hard_iface->soft_iface->mtu) - new_len = BATMAN_OGM_LEN; + new_len = BATMAN_OGM_HLEN; realloc_packet_buffer(hard_iface, new_len); @@ -117,14 +117,14 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv, atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); return tt_changes_fill_buffer(bat_priv, - hard_iface->packet_buff + BATMAN_OGM_LEN, - hard_iface->packet_len - BATMAN_OGM_LEN); + hard_iface->packet_buff + BATMAN_OGM_HLEN, + hard_iface->packet_len - BATMAN_OGM_HLEN); } static int reset_packet_buffer(struct bat_priv *bat_priv, struct hard_iface *hard_iface) { - realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); + realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN); return 0; } @@ -292,7 +292,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work) /* if we still have some more bcasts to send */ if (forw_packet->num_packets < 3) { _add_bcast_packet_to_list(bat_priv, forw_packet, - ((5 * HZ) / 1000)); + msecs_to_jiffies(5)); return; } diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index a5590f4193f..a0ec0e4ada4 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -36,6 +36,7 @@ #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include "unicast.h" +#include "bridge_loop_avoidance.h" static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); @@ -73,439 +74,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len) return 0; } -static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) -{ - if (atomic_dec_and_test(&softif_neigh->refcount)) - kfree_rcu(softif_neigh, rcu); -} - -static void softif_neigh_vid_free_rcu(struct rcu_head *rcu) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh; - struct hlist_node *node, *node_tmp; - struct bat_priv *bat_priv; - - softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu); - bat_priv = softif_neigh_vid->bat_priv; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_for_each_entry_safe(softif_neigh, node, node_tmp, - &softif_neigh_vid->softif_neigh_list, list) { - hlist_del_rcu(&softif_neigh->list); - softif_neigh_free_ref(softif_neigh); - } - spin_unlock_bh(&bat_priv->softif_neigh_lock); - - kfree(softif_neigh_vid); -} - -static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid) -{ - if (atomic_dec_and_test(&softif_neigh_vid->refcount)) - call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu); -} - -static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct hlist_node *node; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - if (softif_neigh_vid->vid != vid) - continue; - - if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) - continue; - - goto out; - } - - softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC); - if (!softif_neigh_vid) - goto out; - - softif_neigh_vid->vid = vid; - softif_neigh_vid->bat_priv = bat_priv; - - /* initialize with 2 - caller decrements counter by one */ - atomic_set(&softif_neigh_vid->refcount, 2); - INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list); - INIT_HLIST_NODE(&softif_neigh_vid->list); - spin_lock_bh(&bat_priv->softif_neigh_vid_lock); - hlist_add_head_rcu(&softif_neigh_vid->list, - &bat_priv->softif_neigh_vids); - spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); - -out: - rcu_read_unlock(); - return softif_neigh_vid; -} - -static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, - const uint8_t *addr, short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh = NULL; - struct hlist_node *node; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh, node, - &softif_neigh_vid->softif_neigh_list, - list) { - if (!compare_eth(softif_neigh->addr, addr)) - continue; - - if (!atomic_inc_not_zero(&softif_neigh->refcount)) - continue; - - softif_neigh->last_seen = jiffies; - goto unlock; - } - - softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC); - if (!softif_neigh) - goto unlock; - - memcpy(softif_neigh->addr, addr, ETH_ALEN); - softif_neigh->last_seen = jiffies; - /* initialize with 2 - caller decrements counter by one */ - atomic_set(&softif_neigh->refcount, 2); - - INIT_HLIST_NODE(&softif_neigh->list); - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_add_head_rcu(&softif_neigh->list, - &softif_neigh_vid->softif_neigh_list); - spin_unlock_bh(&bat_priv->softif_neigh_lock); - -unlock: - rcu_read_unlock(); -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); - return softif_neigh; -} - -static struct softif_neigh *softif_neigh_get_selected( - struct softif_neigh_vid *softif_neigh_vid) -{ - struct softif_neigh *softif_neigh; - - rcu_read_lock(); - softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); - - if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount)) - softif_neigh = NULL; - - rcu_read_unlock(); - return softif_neigh; -} - -static struct softif_neigh *softif_neigh_vid_get_selected( - struct bat_priv *bat_priv, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh = NULL; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - softif_neigh = softif_neigh_get_selected(softif_neigh_vid); -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); - return softif_neigh; -} - -static void softif_neigh_vid_select(struct bat_priv *bat_priv, - struct softif_neigh *new_neigh, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *curr_neigh; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - - if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) - new_neigh = NULL; - - curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh, - 1); - rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh); - - if ((curr_neigh) && (!new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Removing mesh exit point on vid: %d (prev: %pM).\n", - vid, curr_neigh->addr); - else if ((curr_neigh) && (new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Changing mesh exit point on vid: %d from %pM to %pM.\n", - vid, curr_neigh->addr, new_neigh->addr); - else if ((!curr_neigh) && (new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Setting mesh exit point on vid: %d to %pM.\n", - vid, new_neigh->addr); - - if (curr_neigh) - softif_neigh_free_ref(curr_neigh); - - spin_unlock_bh(&bat_priv->softif_neigh_lock); - -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); -} - -static void softif_neigh_vid_deselect(struct bat_priv *bat_priv, - struct softif_neigh_vid *softif_neigh_vid) -{ - struct softif_neigh *curr_neigh; - struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp; - struct hard_iface *primary_if = NULL; - struct hlist_node *node; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* find new softif_neigh immediately to avoid temporary loops */ - rcu_read_lock(); - curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); - - hlist_for_each_entry_rcu(softif_neigh_tmp, node, - &softif_neigh_vid->softif_neigh_list, - list) { - if (softif_neigh_tmp == curr_neigh) - continue; - - /* we got a neighbor but its mac is 'bigger' than ours */ - if (memcmp(primary_if->net_dev->dev_addr, - softif_neigh_tmp->addr, ETH_ALEN) < 0) - continue; - - if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount)) - continue; - - softif_neigh = softif_neigh_tmp; - goto unlock; - } - -unlock: - rcu_read_unlock(); -out: - softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid); - - if (primary_if) - hardif_free_ref(primary_if); - if (softif_neigh) - softif_neigh_free_ref(softif_neigh); -} - -int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) -{ - struct net_device *net_dev = (struct net_device *)seq->private; - struct bat_priv *bat_priv = netdev_priv(net_dev); - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh; - struct hard_iface *primary_if; - struct hlist_node *node, *node_tmp; - struct softif_neigh *curr_softif_neigh; - int ret = 0, last_seen_secs, last_seen_msecs; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) { - ret = seq_printf(seq, - "BATMAN mesh %s disabled - please specify interfaces to enable it\n", - net_dev->name); - goto out; - } - - if (primary_if->if_status != IF_ACTIVE) { - ret = seq_printf(seq, - "BATMAN mesh %s disabled - primary interface not active\n", - net_dev->name); - goto out; - } - - seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - seq_printf(seq, " %-15s %s on vid: %d\n", - "Originator", "last-seen", softif_neigh_vid->vid); - - curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); - - hlist_for_each_entry_rcu(softif_neigh, node_tmp, - &softif_neigh_vid->softif_neigh_list, - list) { - last_seen_secs = jiffies_to_msecs(jiffies - - softif_neigh->last_seen) / 1000; - last_seen_msecs = jiffies_to_msecs(jiffies - - softif_neigh->last_seen) % 1000; - seq_printf(seq, "%s %pM %3i.%03is\n", - curr_softif_neigh == softif_neigh - ? "=>" : " ", softif_neigh->addr, - last_seen_secs, last_seen_msecs); - } - - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - - seq_printf(seq, "\n"); - } - rcu_read_unlock(); - -out: - if (primary_if) - hardif_free_ref(primary_if); - return ret; -} - -void softif_neigh_purge(struct bat_priv *bat_priv) -{ - struct softif_neigh *softif_neigh, *curr_softif_neigh; - struct softif_neigh_vid *softif_neigh_vid; - struct hlist_node *node, *node_tmp, *node_tmp2; - int do_deselect; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) - continue; - - curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); - do_deselect = 0; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2, - &softif_neigh_vid->softif_neigh_list, - list) { - if ((!has_timed_out(softif_neigh->last_seen, - SOFTIF_NEIGH_TIMEOUT)) && - (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) - continue; - - if (curr_softif_neigh == softif_neigh) { - bat_dbg(DBG_ROUTES, bat_priv, - "Current mesh exit point on vid: %d '%pM' vanished.\n", - softif_neigh_vid->vid, - softif_neigh->addr); - do_deselect = 1; - } - - hlist_del_rcu(&softif_neigh->list); - softif_neigh_free_ref(softif_neigh); - } - spin_unlock_bh(&bat_priv->softif_neigh_lock); - - /* soft_neigh_vid_deselect() needs to acquire the - * softif_neigh_lock */ - if (do_deselect) - softif_neigh_vid_deselect(bat_priv, softif_neigh_vid); - - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - - softif_neigh_vid_free_ref(softif_neigh_vid); - } - rcu_read_unlock(); - - spin_lock_bh(&bat_priv->softif_neigh_vid_lock); - hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp, - &bat_priv->softif_neigh_vids, list) { - if (!hlist_empty(&softif_neigh_vid->softif_neigh_list)) - continue; - - hlist_del_rcu(&softif_neigh_vid->list); - softif_neigh_vid_free_ref(softif_neigh_vid); - } - spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); - -} - -static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, - short vid) -{ - struct bat_priv *bat_priv = netdev_priv(dev); - struct ethhdr *ethhdr = (struct ethhdr *)skb->data; - struct batman_ogm_packet *batman_ogm_packet; - struct softif_neigh *softif_neigh = NULL; - struct hard_iface *primary_if = NULL; - struct softif_neigh *curr_softif_neigh = NULL; - - if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) - batman_ogm_packet = (struct batman_ogm_packet *) - (skb->data + ETH_HLEN + VLAN_HLEN); - else - batman_ogm_packet = (struct batman_ogm_packet *) - (skb->data + ETH_HLEN); - - if (batman_ogm_packet->header.version != COMPAT_VERSION) - goto out; - - if (batman_ogm_packet->header.packet_type != BAT_OGM) - goto out; - - if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) - goto out; - - if (is_my_mac(batman_ogm_packet->orig)) - goto out; - - softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid); - if (!softif_neigh) - goto out; - - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh == softif_neigh) - goto out; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* we got a neighbor but its mac is 'bigger' than ours */ - if (memcmp(primary_if->net_dev->dev_addr, - softif_neigh->addr, ETH_ALEN) < 0) - goto out; - - /* close own batX device and use softif_neigh as exit node */ - if (!curr_softif_neigh) { - softif_neigh_vid_select(bat_priv, softif_neigh, vid); - goto out; - } - - /* switch to new 'smallest neighbor' */ - if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0) - softif_neigh_vid_select(bat_priv, softif_neigh, vid); - -out: - kfree_skb(skb); - if (softif_neigh) - softif_neigh_free_ref(softif_neigh); - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - if (primary_if) - hardif_free_ref(primary_if); - return; -} - static int interface_open(struct net_device *dev) { netif_start_queue(dev); @@ -562,10 +130,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct hard_iface *primary_if = NULL; struct bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; - struct softif_neigh *curr_softif_neigh = NULL; + static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, + 0x00}; unsigned int header_len = 0; int data_len = skb->len, ret; - short vid = -1; + short vid __maybe_unused = -1; bool do_bcast = false; if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) @@ -583,21 +152,21 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* fall through */ case ETH_P_BATMAN: - softif_batman_recv(skb, soft_iface, vid); - goto end; + goto dropped; } - /** - * if we have a another chosen mesh exit node in range - * it will transport the packets to the mesh - */ - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh) + if (bla_tx(bat_priv, skb, vid)) goto dropped; /* Register the client MAC in the transtable */ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); + /* don't accept stp packets. STP does not help in meshes. + * better use the bridge loop avoidance ... + */ + if (compare_eth(ethhdr->h_dest, stp_addr)) + goto dropped; + if (is_multicast_ether_addr(ethhdr->h_dest)) { do_bcast = true; @@ -675,8 +244,6 @@ dropped: dropped_freed: bat_priv->stats.tx_dropped++; end: - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); if (primary_if) hardif_free_ref(primary_if); return NETDEV_TX_OK; @@ -687,12 +254,13 @@ void interface_rx(struct net_device *soft_iface, int hdr_size) { struct bat_priv *bat_priv = netdev_priv(soft_iface); - struct unicast_packet *unicast_packet; struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; - struct softif_neigh *curr_softif_neigh = NULL; - short vid = -1; - int ret; + struct batman_header *batadv_header = (struct batman_header *)skb->data; + short vid __maybe_unused = -1; + bool is_bcast; + + is_bcast = (batadv_header->packet_type == BAT_BCAST); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) @@ -716,30 +284,6 @@ void interface_rx(struct net_device *soft_iface, goto dropped; } - /** - * if we have a another chosen mesh exit node in range - * it will transport the packets to the non-mesh network - */ - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh) { - skb_push(skb, hdr_size); - unicast_packet = (struct unicast_packet *)skb->data; - - if ((unicast_packet->header.packet_type != BAT_UNICAST) && - (unicast_packet->header.packet_type != BAT_UNICAST_FRAG)) - goto dropped; - - skb_reset_mac_header(skb); - - memcpy(unicast_packet->dest, - curr_softif_neigh->addr, ETH_ALEN); - ret = route_unicast_packet(skb, recv_if); - if (ret == NET_RX_DROP) - goto dropped; - - goto out; - } - /* skb->dev & skb->pkt_type are set here */ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) goto dropped; @@ -752,21 +296,25 @@ void interface_rx(struct net_device *soft_iface, /* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ bat_priv->stats.rx_packets++; - bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); + bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; soft_iface->last_rx = jiffies; if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; + /* Let the bridge loop avoidance check the packet. If will + * not handle it, we can safely push it up. + */ + if (bla_rx(bat_priv, skb, vid, is_bcast)) + goto out; + netif_rx(skb); goto out; dropped: kfree_skb(skb); out: - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); return; } @@ -828,13 +376,14 @@ struct net_device *softif_create(const char *name) atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); + atomic_set(&bat_priv->bridge_loop_avoidance, 0); atomic_set(&bat_priv->ap_isolation, 0); atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); atomic_set(&bat_priv->gw_sel_class, 20); atomic_set(&bat_priv->gw_bandwidth, 41); atomic_set(&bat_priv->orig_interval, 1000); - atomic_set(&bat_priv->hop_penalty, 10); + atomic_set(&bat_priv->hop_penalty, 30); atomic_set(&bat_priv->log_level, 0); atomic_set(&bat_priv->fragmentation, 1); atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); @@ -845,6 +394,7 @@ struct net_device *softif_create(const char *name) atomic_set(&bat_priv->ttvn, 0); atomic_set(&bat_priv->tt_local_changes, 0); atomic_set(&bat_priv->tt_ogm_append_cnt, 0); + atomic_set(&bat_priv->bla_num_requests, 0); bat_priv->tt_buff = NULL; bat_priv->tt_buff_len = 0; diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index 756eab5b8dd..02030067388 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -23,8 +23,6 @@ #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ int my_skb_head_push(struct sk_buff *skb, unsigned int len); -int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); -void softif_neigh_purge(struct bat_priv *bat_priv); void interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct hard_iface *recv_if, int hdr_size); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 1f869212784..2ab83d7fb1f 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * - * Marek Lindner, Simon Wunderlich + * Marek Lindner, Simon Wunderlich, Antonio Quartulli * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -27,13 +27,14 @@ #include "hash.h" #include "originator.h" #include "routing.h" +#include "bridge_loop_avoidance.h" #include <linux/crc16.h> -static void _tt_global_del(struct bat_priv *bat_priv, - struct tt_global_entry *tt_global_entry, - const char *message); +static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node); static void tt_purge(struct work_struct *work); +static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry); /* returns 1 if they are the same mac addr */ static int compare_tt(const struct hlist_node *node, const void *data2) @@ -123,17 +124,32 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (tt_global_entry->orig_node) - orig_node_free_ref(tt_global_entry->orig_node); - kfree(tt_global_entry); } static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) { - if (atomic_dec_and_test(&tt_global_entry->common.refcount)) + if (atomic_dec_and_test(&tt_global_entry->common.refcount)) { + tt_global_del_orig_list(tt_global_entry); call_rcu(&tt_global_entry->common.rcu, tt_global_entry_free_rcu); + } +} + +static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu) +{ + struct tt_orig_list_entry *orig_entry; + + orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu); + orig_node_free_ref(orig_entry->orig_node); + kfree(orig_entry); +} + +static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry) +{ + /* to avoid race conditions, immediately decrease the tt counter */ + atomic_dec(&orig_entry->orig_node->tt_size); + call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu); } static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, @@ -182,12 +198,17 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; int hash_added; tt_local_entry = tt_local_hash_find(bat_priv, addr); if (tt_local_entry) { tt_local_entry->last_seen = jiffies; + /* possibly unset the TT_CLIENT_PENDING flag */ + tt_local_entry->common.flags &= ~TT_CLIENT_PENDING; goto out; } @@ -232,14 +253,21 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, /* Check whether it is a roaming! */ if (tt_global_entry) { - /* This node is probably going to update its tt table */ - tt_global_entry->orig_node->tt_poss_change = true; - /* The global entry has to be marked as ROAMING and has to be - * kept for consistency purpose */ + /* These node are probably going to update their tt table */ + head = &tt_global_entry->orig_list; + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + orig_entry->orig_node->tt_poss_change = true; + + send_roam_adv(bat_priv, tt_global_entry->common.addr, + orig_entry->orig_node); + } + rcu_read_unlock(); + /* The global entry has to be marked as ROAMING and + * has to be kept for consistency purpose + */ tt_global_entry->common.flags |= TT_CLIENT_ROAM; tt_global_entry->roam_at = jiffies; - send_roam_adv(bat_priv, tt_global_entry->common.addr, - tt_global_entry->orig_node); } out: if (tt_local_entry) @@ -490,33 +518,76 @@ static void tt_changes_list_free(struct bat_priv *bat_priv) spin_unlock_bh(&bat_priv->tt_changes_list_lock); } +/* find out if an orig_node is already in the list of a tt_global_entry. + * returns 1 if found, 0 otherwise + */ +static bool tt_global_entry_has_orig(const struct tt_global_entry *entry, + const struct orig_node *orig_node) +{ + struct tt_orig_list_entry *tmp_orig_entry; + const struct hlist_head *head; + struct hlist_node *node; + bool found = false; + + rcu_read_lock(); + head = &entry->orig_list; + hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { + if (tmp_orig_entry->orig_node == orig_node) { + found = true; + break; + } + } + rcu_read_unlock(); + return found; +} + +static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + int ttvn) +{ + struct tt_orig_list_entry *orig_entry; + + orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); + if (!orig_entry) + return; + + INIT_HLIST_NODE(&orig_entry->list); + atomic_inc(&orig_node->refcount); + atomic_inc(&orig_node->tt_size); + orig_entry->orig_node = orig_node; + orig_entry->ttvn = ttvn; + + spin_lock_bh(&tt_global_entry->list_lock); + hlist_add_head_rcu(&orig_entry->list, + &tt_global_entry->orig_list); + spin_unlock_bh(&tt_global_entry->list_lock); +} + /* caller must hold orig_node refcount */ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, const unsigned char *tt_addr, uint8_t ttvn, bool roaming, bool wifi) { - struct tt_global_entry *tt_global_entry; - struct orig_node *orig_node_tmp; + struct tt_global_entry *tt_global_entry = NULL; int ret = 0; int hash_added; tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); if (!tt_global_entry) { - tt_global_entry = - kmalloc(sizeof(*tt_global_entry), - GFP_ATOMIC); + tt_global_entry = kzalloc(sizeof(*tt_global_entry), + GFP_ATOMIC); if (!tt_global_entry) goto out; memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); + tt_global_entry->common.flags = NO_FLAGS; - atomic_set(&tt_global_entry->common.refcount, 2); - /* Assign the new orig_node */ - atomic_inc(&orig_node->refcount); - tt_global_entry->orig_node = orig_node; - tt_global_entry->ttvn = ttvn; tt_global_entry->roam_at = 0; + atomic_set(&tt_global_entry->common.refcount, 2); + + INIT_HLIST_HEAD(&tt_global_entry->orig_list); + spin_lock_init(&tt_global_entry->list_lock); hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, choose_orig, &tt_global_entry->common, @@ -527,19 +598,27 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, tt_global_entry_free_ref(tt_global_entry); goto out_remove; } - atomic_inc(&orig_node->tt_size); + + tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn); } else { - if (tt_global_entry->orig_node != orig_node) { - atomic_dec(&tt_global_entry->orig_node->tt_size); - orig_node_tmp = tt_global_entry->orig_node; - atomic_inc(&orig_node->refcount); - tt_global_entry->orig_node = orig_node; - orig_node_free_ref(orig_node_tmp); - atomic_inc(&orig_node->tt_size); + /* there is already a global entry, use this one. */ + + /* If there is the TT_CLIENT_ROAM flag set, there is only one + * originator left in the list and we previously received a + * delete + roaming change for this originator. + * + * We should first delete the old originator before adding the + * new one. + */ + if (tt_global_entry->common.flags & TT_CLIENT_ROAM) { + tt_global_del_orig_list(tt_global_entry); + tt_global_entry->common.flags &= ~TT_CLIENT_ROAM; + tt_global_entry->roam_at = 0; } - tt_global_entry->common.flags = NO_FLAGS; - tt_global_entry->ttvn = ttvn; - tt_global_entry->roam_at = 0; + + if (!tt_global_entry_has_orig(tt_global_entry, orig_node)) + tt_global_add_orig_entry(tt_global_entry, orig_node, + ttvn); } if (wifi) @@ -560,6 +639,34 @@ out: return ret; } +/* print all orig nodes who announce the address for this global entry. + * it is assumed that the caller holds rcu_read_lock(); + */ +static void tt_global_print_entry(struct tt_global_entry *tt_global_entry, + struct seq_file *seq) +{ + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + struct tt_common_entry *tt_common_entry; + uint16_t flags; + uint8_t last_ttvn; + + tt_common_entry = &tt_global_entry->common; + + head = &tt_global_entry->orig_list; + + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + flags = tt_common_entry->flags; + last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); + seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n", + tt_global_entry->common.addr, orig_entry->ttvn, + orig_entry->orig_node->orig, last_ttvn, + (flags & TT_CLIENT_ROAM ? 'R' : '.'), + (flags & TT_CLIENT_WIFI ? 'W' : '.')); + } +} + int tt_global_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; @@ -603,18 +710,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - seq_printf(seq, - " * %pM (%3u) via %pM (%3u) [%c%c]\n", - tt_global_entry->common.addr, - tt_global_entry->ttvn, - tt_global_entry->orig_node->orig, - (uint8_t) atomic_read( - &tt_global_entry->orig_node-> - last_ttvn), - (tt_global_entry->common.flags & - TT_CLIENT_ROAM ? 'R' : '.'), - (tt_global_entry->common.flags & - TT_CLIENT_WIFI ? 'W' : '.')); + tt_global_print_entry(tt_global_entry, seq); } rcu_read_unlock(); } @@ -624,59 +720,150 @@ out: return ret; } -static void _tt_global_del(struct bat_priv *bat_priv, - struct tt_global_entry *tt_global_entry, - const char *message) +/* deletes the orig list of a tt_global_entry */ +static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry) { - if (!tt_global_entry) - goto out; + struct hlist_head *head; + struct hlist_node *node, *safe; + struct tt_orig_list_entry *orig_entry; - bat_dbg(DBG_TT, bat_priv, - "Deleting global tt entry %pM (via %pM): %s\n", - tt_global_entry->common.addr, tt_global_entry->orig_node->orig, - message); + spin_lock_bh(&tt_global_entry->list_lock); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { + hlist_del_rcu(node); + tt_orig_list_entry_free_ref(orig_entry); + } + spin_unlock_bh(&tt_global_entry->list_lock); + +} - atomic_dec(&tt_global_entry->orig_node->tt_size); +static void tt_global_del_orig_entry(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + const char *message) +{ + struct hlist_head *head; + struct hlist_node *node, *safe; + struct tt_orig_list_entry *orig_entry; + + spin_lock_bh(&tt_global_entry->list_lock); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { + if (orig_entry->orig_node == orig_node) { + bat_dbg(DBG_TT, bat_priv, + "Deleting %pM from global tt entry %pM: %s\n", + orig_node->orig, tt_global_entry->common.addr, + message); + hlist_del_rcu(node); + tt_orig_list_entry_free_ref(orig_entry); + } + } + spin_unlock_bh(&tt_global_entry->list_lock); +} + +static void tt_global_del_struct(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + const char *message) +{ + bat_dbg(DBG_TT, bat_priv, + "Deleting global tt entry %pM: %s\n", + tt_global_entry->common.addr, message); hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, tt_global_entry->common.addr); -out: - if (tt_global_entry) - tt_global_entry_free_ref(tt_global_entry); + tt_global_entry_free_ref(tt_global_entry); + +} + +/* If the client is to be deleted, we check if it is the last origantor entry + * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer, + * otherwise we simply remove the originator scheduled for deletion. + */ +static void tt_global_del_roaming(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + const char *message) +{ + bool last_entry = true; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + + /* no local entry exists, case 1: + * Check if this is the last one or if other entries exist. + */ + + rcu_read_lock(); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + if (orig_entry->orig_node != orig_node) { + last_entry = false; + break; + } + } + rcu_read_unlock(); + + if (last_entry) { + /* its the last one, mark for roaming. */ + tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->roam_at = jiffies; + } else + /* there is another entry, we can simply delete this + * one and can still use the other one. + */ + tt_global_del_orig_entry(bat_priv, tt_global_entry, + orig_node, message); } -void tt_global_del(struct bat_priv *bat_priv, - struct orig_node *orig_node, const unsigned char *addr, - const char *message, bool roaming) + + +static void tt_global_del(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const unsigned char *addr, + const char *message, bool roaming) { struct tt_global_entry *tt_global_entry = NULL; struct tt_local_entry *tt_local_entry = NULL; tt_global_entry = tt_global_hash_find(bat_priv, addr); - if (!tt_global_entry || tt_global_entry->orig_node != orig_node) + if (!tt_global_entry) goto out; - if (!roaming) - goto out_del; + if (!roaming) { + tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node, + message); + + if (hlist_empty(&tt_global_entry->orig_list)) + tt_global_del_struct(bat_priv, tt_global_entry, + message); + + goto out; + } /* if we are deleting a global entry due to a roam * event, there are two possibilities: - * 1) the client roamed from node A to node B => we mark + * 1) the client roamed from node A to node B => if there + * is only one originator left for this client, we mark * it with TT_CLIENT_ROAM, we start a timer and we * wait for node B to claim it. In case of timeout * the entry is purged. + * + * If there are other originators left, we directly delete + * the originator. * 2) the client roamed to us => we can directly delete * the global entry, since it is useless now. */ + tt_local_entry = tt_local_hash_find(bat_priv, tt_global_entry->common.addr); - if (!tt_local_entry) { - tt_global_entry->common.flags |= TT_CLIENT_ROAM; - tt_global_entry->roam_at = jiffies; - goto out; - } + if (tt_local_entry) { + /* local entry exists, case 2: client roamed to us. */ + tt_global_del_orig_list(tt_global_entry); + tt_global_del_struct(bat_priv, tt_global_entry, message); + } else + /* no local entry exists, case 1: check for roaming */ + tt_global_del_roaming(bat_priv, tt_global_entry, orig_node, + message); -out_del: - _tt_global_del(bat_priv, tt_global_entry, message); out: if (tt_global_entry) @@ -709,11 +896,14 @@ void tt_global_del_orig(struct bat_priv *bat_priv, tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (tt_global_entry->orig_node == orig_node) { + + tt_global_del_orig_entry(bat_priv, tt_global_entry, + orig_node, message); + + if (hlist_empty(&tt_global_entry->orig_list)) { bat_dbg(DBG_TT, bat_priv, - "Deleting global tt entry %pM (via %pM): %s\n", + "Deleting global tt entry %pM: %s\n", tt_global_entry->common.addr, - tt_global_entry->orig_node->orig, message); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); @@ -721,7 +911,6 @@ void tt_global_del_orig(struct bat_priv *bat_priv, } spin_unlock_bh(list_lock); } - atomic_set(&orig_node->tt_size, 0); orig_node->tt_initialised = false; } @@ -754,7 +943,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv) bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry (%pM): Roaming timeout\n", tt_global_entry->common.addr); - atomic_dec(&tt_global_entry->orig_node->tt_size); + hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); } @@ -817,6 +1006,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; struct orig_node *orig_node = NULL; + struct neigh_node *router = NULL; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + int best_tq; if (src && atomic_read(&bat_priv->ap_isolation)) { tt_local_entry = tt_local_hash_find(bat_priv, src); @@ -833,11 +1027,25 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) goto out; - if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) - goto out; + best_tq = 0; - orig_node = tt_global_entry->orig_node; + rcu_read_lock(); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + router = orig_node_get_router(orig_entry->orig_node); + if (!router) + continue; + if (router->tq_avg > best_tq) { + orig_node = orig_entry->orig_node; + best_tq = router->tq_avg; + } + neigh_node_free_ref(router); + } + /* found anything? */ + if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) + orig_node = NULL; + rcu_read_unlock(); out: if (tt_global_entry) tt_global_entry_free_ref(tt_global_entry); @@ -848,7 +1056,8 @@ out: } /* Calculates the checksum of the local table of a given orig_node */ -uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) +static uint16_t tt_global_crc(struct bat_priv *bat_priv, + struct orig_node *orig_node) { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_global_hash; @@ -868,20 +1077,26 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (compare_eth(tt_global_entry->orig_node, - orig_node)) { - /* Roaming clients are in the global table for - * consistency only. They don't have to be - * taken into account while computing the - * global crc */ - if (tt_common_entry->flags & TT_CLIENT_ROAM) - continue; - total_one = 0; - for (j = 0; j < ETH_ALEN; j++) - total_one = crc16_byte(total_one, - tt_common_entry->addr[j]); - total ^= total_one; - } + /* Roaming clients are in the global table for + * consistency only. They don't have to be + * taken into account while computing the + * global crc + */ + if (tt_global_entry->common.flags & TT_CLIENT_ROAM) + continue; + + /* find out if this global entry is announced by this + * originator + */ + if (!tt_global_entry_has_orig(tt_global_entry, + orig_node)) + continue; + + total_one = 0; + for (j = 0; j < ETH_ALEN; j++) + total_one = crc16_byte(total_one, + tt_global_entry->common.addr[j]); + total ^= total_one; } rcu_read_unlock(); } @@ -936,8 +1151,10 @@ static void tt_req_list_free(struct bat_priv *bat_priv) spin_unlock_bh(&bat_priv->tt_req_list_lock); } -void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes) +static void tt_save_orig_buffer(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const unsigned char *tt_buff, + uint8_t tt_num_changes) { uint16_t tt_buff_len = tt_len(tt_num_changes); @@ -1020,7 +1237,7 @@ static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - return (tt_global_entry->orig_node == orig_node); + return tt_global_entry_has_orig(tt_global_entry, orig_node); } static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, @@ -1124,7 +1341,7 @@ static int send_tt_request(struct bat_priv *bat_priv, memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); tt_request->header.ttl = TTL; tt_request->ttvn = ttvn; - tt_request->tt_data = tt_crc; + tt_request->tt_data = htons(tt_crc); tt_request->flags = TT_REQUEST; if (full_table) @@ -1401,10 +1618,15 @@ out: bool send_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_request) { - if (is_my_mac(tt_request->dst)) + if (is_my_mac(tt_request->dst)) { + /* don't answer backbone gws! */ + if (bla_is_backbone_gw_orig(bat_priv, tt_request->src)) + return true; + return send_my_tt_response(bat_priv, tt_request); - else + } else { return send_other_tt_response(bat_priv, tt_request); + } } static void _tt_update_changes(struct bat_priv *bat_priv, @@ -1508,6 +1730,10 @@ void handle_tt_response(struct bat_priv *bat_priv, tt_response->src, tt_response->ttvn, tt_response->tt_data, (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); + /* we should have never asked a backbone gw */ + if (bla_is_backbone_gw_orig(bat_priv, tt_response->src)) + goto out; + orig_node = orig_hash_find(bat_priv, tt_response->src); if (!orig_node) goto out; @@ -1627,8 +1853,8 @@ unlock: return ret; } -void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, - struct orig_node *orig_node) +static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node) { struct neigh_node *neigh_node = NULL; struct sk_buff *skb = NULL; @@ -1796,6 +2022,8 @@ void tt_commit_changes(struct bat_priv *bat_priv) /* Increment the TTVN only once per OGM interval */ atomic_inc(&bat_priv->ttvn); + bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", + (uint8_t)atomic_read(&bat_priv->ttvn)); bat_priv->tt_poss_change = false; } @@ -1803,10 +2031,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) { struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; - bool ret = true; + bool ret = false; if (!atomic_read(&bat_priv->ap_isolation)) - return false; + goto out; tt_local_entry = tt_local_hash_find(bat_priv, dst); if (!tt_local_entry) @@ -1816,10 +2044,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) if (!tt_global_entry) goto out; - if (_is_ap_isolated(tt_local_entry, tt_global_entry)) + if (!_is_ap_isolated(tt_local_entry, tt_global_entry)) goto out; - ret = false; + ret = true; out: if (tt_global_entry) @@ -1836,6 +2064,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); bool full_table = true; + /* don't care about a backbone gateways updates. */ + if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) + return; + /* orig table not initialised AND first diff is in the OGM OR the ttvn * increased by one -> we can apply the attached changes */ if ((!orig_node->tt_initialised && ttvn == 1) || @@ -1873,6 +2105,7 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, } else { /* if we missed more than one change or our tables are not * in sync anymore -> request fresh tt data */ + if (!orig_node->tt_initialised || ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { request_table: @@ -1886,3 +2119,22 @@ request_table: } } } + +/* returns true whether we know that the client has moved from its old + * originator to another one. This entry is kept is still kept for consistency + * purposes + */ +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr) +{ + struct tt_global_entry *tt_global_entry; + bool ret = false; + + tt_global_entry = tt_global_hash_find(bat_priv, addr); + if (!tt_global_entry) + goto out; + + ret = tt_global_entry->common.flags & TT_CLIENT_ROAM; + tt_global_entry_free_ref(tt_global_entry); +out: + return ret; +} diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index c753633b1da..c43374dc364 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -1,7 +1,7 @@ /* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * - * Marek Lindner, Simon Wunderlich + * Marek Lindner, Simon Wunderlich, Antonio Quartulli * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -39,27 +39,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, int tt_global_seq_print_text(struct seq_file *seq, void *offset); void tt_global_del_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const char *message); -void tt_global_del(struct bat_priv *bat_priv, - struct orig_node *orig_node, const unsigned char *addr, - const char *message, bool roaming); struct orig_node *transtable_search(struct bat_priv *bat_priv, const uint8_t *src, const uint8_t *addr); -void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes); uint16_t tt_local_crc(struct bat_priv *bat_priv); -uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node); void tt_free(struct bat_priv *bat_priv); bool send_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_request); bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); void handle_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_response); -void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, - struct orig_node *orig_node); void tt_commit_changes(struct bat_priv *bat_priv); bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const unsigned char *tt_buff, uint8_t tt_num_changes, uint8_t ttvn, uint16_t tt_crc); +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr); + #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 302efb52347..61308e8016f 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -27,7 +27,7 @@ #include "packet.h" #include "bitarray.h" -#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \ +#define BAT_HEADER_LEN (ETH_HLEN + \ ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ sizeof(struct unicast_packet) : \ sizeof(struct bcast_packet)))) @@ -52,7 +52,7 @@ struct hard_iface { /** * orig_node - structure for orig_list maintaining nodes of mesh * @primary_addr: hosts primary interface address - * @last_valid: when last packet from this node was received + * @last_seen: when last packet from this node was received * @bcast_seqno_reset: time when the broadcast seqno window was reset * @batman_seqno_reset: time when the batman seqno window was reset * @gw_flags: flags related to gateway class @@ -70,7 +70,7 @@ struct orig_node { struct neigh_node __rcu *router; /* rcu protected pointer */ unsigned long *bcast_own; uint8_t *bcast_own_sum; - unsigned long last_valid; + unsigned long last_seen; unsigned long bcast_seqno_reset; unsigned long batman_seqno_reset; uint8_t gw_flags; @@ -90,7 +90,7 @@ struct orig_node { bool tt_poss_change; uint32_t last_real_seqno; uint8_t last_ttl; - unsigned long bcast_bits[NUM_WORDS]; + DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE); uint32_t last_bcast_seqno; struct hlist_head neigh_list; struct list_head frag_list; @@ -120,7 +120,7 @@ struct gw_node { /** * neigh_node - * @last_valid: when last packet via this neighbor was received + * @last_seen: when last packet via this neighbor was received */ struct neigh_node { struct hlist_node list; @@ -131,15 +131,22 @@ struct neigh_node { uint8_t tq_avg; uint8_t last_ttl; struct list_head bonding_list; - unsigned long last_valid; - unsigned long real_bits[NUM_WORDS]; + unsigned long last_seen; + DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE); atomic_t refcount; struct rcu_head rcu; struct orig_node *orig_node; struct hard_iface *if_incoming; - spinlock_t tq_lock; /* protects: tq_recv, tq_index */ + spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */ }; +#ifdef CONFIG_BATMAN_ADV_BLA +struct bcast_duplist_entry { + uint8_t orig[ETH_ALEN]; + uint16_t crc; + unsigned long entrytime; +}; +#endif struct bat_priv { atomic_t mesh_state; @@ -148,6 +155,7 @@ struct bat_priv { atomic_t bonding; /* boolean */ atomic_t fragmentation; /* boolean */ atomic_t ap_isolation; /* boolean */ + atomic_t bridge_loop_avoidance; /* boolean */ atomic_t vis_mode; /* VIS_TYPE_* */ atomic_t gw_mode; /* GW_MODE_* */ atomic_t gw_sel_class; /* uint */ @@ -161,6 +169,7 @@ struct bat_priv { atomic_t ttvn; /* translation table version number */ atomic_t tt_ogm_append_cnt; atomic_t tt_local_changes; /* changes registered in a OGM interval */ + atomic_t bla_num_requests; /* number of bla requests in flight */ /* The tt_poss_change flag is used to detect an ongoing roaming phase. * If true, then I received a Roaming_adv and I have to inspect every * packet directed to me to check whether I am still the true @@ -174,15 +183,23 @@ struct bat_priv { struct hlist_head forw_bat_list; struct hlist_head forw_bcast_list; struct hlist_head gw_list; - struct hlist_head softif_neigh_vids; struct list_head tt_changes_list; /* tracks changes in a OGM int */ struct list_head vis_send_list; struct hashtable_t *orig_hash; struct hashtable_t *tt_local_hash; struct hashtable_t *tt_global_hash; +#ifdef CONFIG_BATMAN_ADV_BLA + struct hashtable_t *claim_hash; + struct hashtable_t *backbone_hash; +#endif struct list_head tt_req_list; /* list of pending tt_requests */ struct list_head tt_roam_list; struct hashtable_t *vis_hash; +#ifdef CONFIG_BATMAN_ADV_BLA + struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE]; + int bcast_duplist_curr; + struct bla_claim_dst claim_dest; +#endif spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects */ spinlock_t tt_changes_list_lock; /* protects tt_changes */ @@ -191,8 +208,6 @@ struct bat_priv { spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ spinlock_t vis_hash_lock; /* protects vis_hash */ spinlock_t vis_list_lock; /* protects vis_info::recv_list */ - spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ - spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ atomic_t num_local_tt; /* Checksum of the local table, recomputed before sending a new OGM */ atomic_t tt_crc; @@ -202,6 +217,7 @@ struct bat_priv { struct delayed_work tt_work; struct delayed_work orig_work; struct delayed_work vis_work; + struct delayed_work bla_work; struct gw_node __rcu *curr_gw; /* rcu protected pointer */ atomic_t gw_reselect; struct hard_iface __rcu *primary_if; /* rcu protected pointer */ @@ -239,10 +255,41 @@ struct tt_local_entry { struct tt_global_entry { struct tt_common_entry common; + struct hlist_head orig_list; + spinlock_t list_lock; /* protects the list */ + unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ +}; + +struct tt_orig_list_entry { struct orig_node *orig_node; uint8_t ttvn; - unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ + struct rcu_head rcu; + struct hlist_node list; +}; + +#ifdef CONFIG_BATMAN_ADV_BLA +struct backbone_gw { + uint8_t orig[ETH_ALEN]; + short vid; /* used VLAN ID */ + struct hlist_node hash_entry; + struct bat_priv *bat_priv; + unsigned long lasttime; /* last time we heard of this backbone gw */ + atomic_t request_sent; + atomic_t refcount; + struct rcu_head rcu; + uint16_t crc; /* crc checksum over all claims */ +}; + +struct claim { + uint8_t addr[ETH_ALEN]; + short vid; + struct backbone_gw *backbone_gw; + unsigned long lasttime; /* last time we heard of claim (locals only) */ + struct rcu_head rcu; + atomic_t refcount; + struct hlist_node hash_entry; }; +#endif struct tt_change_node { struct list_head list; @@ -327,41 +374,24 @@ struct recvlist_node { uint8_t mac[ETH_ALEN]; }; -struct softif_neigh_vid { - struct hlist_node list; - struct bat_priv *bat_priv; - short vid; - atomic_t refcount; - struct softif_neigh __rcu *softif_neigh; - struct rcu_head rcu; - struct hlist_head softif_neigh_list; -}; - -struct softif_neigh { - struct hlist_node list; - uint8_t addr[ETH_ALEN]; - unsigned long last_seen; - atomic_t refcount; - struct rcu_head rcu; -}; - struct bat_algo_ops { struct hlist_node list; char *name; - /* init OGM when hard-interface is enabled */ - void (*bat_ogm_init)(struct hard_iface *hard_iface); - /* init primary OGM when primary interface is selected */ - void (*bat_ogm_init_primary)(struct hard_iface *hard_iface); - /* init mac addresses of the OGM belonging to this hard-interface */ - void (*bat_ogm_update_mac)(struct hard_iface *hard_iface); + /* init routing info when hard-interface is enabled */ + int (*bat_iface_enable)(struct hard_iface *hard_iface); + /* de-init routing info when hard-interface is disabled */ + void (*bat_iface_disable)(struct hard_iface *hard_iface); + /* (re-)init mac addresses of the protocol information + * belonging to this hard-interface + */ + void (*bat_iface_update_mac)(struct hard_iface *hard_iface); + /* called when primary interface is selected / changed */ + void (*bat_primary_iface_set)(struct hard_iface *hard_iface); /* prepare a new outgoing OGM for the send queue */ void (*bat_ogm_schedule)(struct hard_iface *hard_iface, int tt_num_changes); /* send scheduled OGM */ void (*bat_ogm_emit)(struct forw_packet *forw_packet); - /* receive incoming OGM */ - void (*bat_ogm_receive)(struct hard_iface *if_incoming, - struct sk_buff *skb); }; #endif /* _NET_BATMAN_ADV_TYPES_H_ */ diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index 676f6a626b2..74175c21085 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -331,6 +331,14 @@ find_router: unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); + /* inform the destination node that we are still missing a correct route + * for this client. The destination will receive this packet and will + * try to reroute it because the ttvn contained in the header is less + * than the current one + */ + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) + unicast_packet->ttvn = unicast_packet->ttvn - 1; + if (atomic_read(&bat_priv->fragmentation) && data_len + sizeof(*unicast_packet) > neigh_node->if_incoming->net_dev->mtu) { diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c4a5b8cafad..cec216fb77c 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -434,12 +434,12 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, return NULL; info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + - sizeof(struct ethhdr)); + ETH_HLEN); if (!info->skb_packet) { kfree(info); return NULL; } - skb_reserve(info->skb_packet, sizeof(struct ethhdr)); + skb_reserve(info->skb_packet, ETH_HLEN); packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) + vis_info_len); @@ -894,11 +894,11 @@ int vis_init(struct bat_priv *bat_priv) bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + MAX_VIS_PACKET_SIZE + - sizeof(struct ethhdr)); + ETH_HLEN); if (!bat_priv->my_vis_info->skb_packet) goto free_info; - skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); + skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, sizeof(*packet)); diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 6fb68a9743a..3e18af4dadc 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) } if (sk->sk_state == BT_CONNECTED || !newsock || - bt_sk(parent)->defer_setup) { + test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { bt_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); @@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent) list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); if (sk->sk_state == BT_CONNECTED || - (bt_sk(parent)->defer_setup && - sk->sk_state == BT_CONNECT2)) + (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && + sk->sk_state == BT_CONNECT2)) return POLLIN | POLLRDNORM; } @@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa sk->sk_state == BT_CONFIG) return mask; - if (!bt_sk(sk)->suspended && sock_writeable(sk)) + if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index a779ec70332..031d7d65675 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -69,7 +69,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst) BT_DBG(""); list_for_each_entry(s, &bnep_session_list, list) - if (!compare_ether_addr(dst, s->eh.h_source)) + if (ether_addr_equal(dst, s->eh.h_source)) return s; return NULL; @@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) } /* Strip 802.1p header */ - if (ntohs(s->eh.h_proto) == 0x8100) { + if (ntohs(s->eh.h_proto) == ETH_P_8021Q) { if (!skb_pull(skb, 4)) goto badframe; s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); @@ -422,10 +422,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) iv[il++] = (struct kvec) { &type, 1 }; len++; - if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source)) + if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source)) type |= 0x01; - if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest)) + if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest)) type |= 0x02; if (type) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5238b6b3ea6..3f18a6ed973 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], } EXPORT_SYMBOL(hci_le_start_enc); -void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]) -{ - struct hci_dev *hdev = conn->hdev; - struct hci_cp_le_ltk_reply cp; - - BT_DBG("%p", conn); - - memset(&cp, 0, sizeof(cp)); - - cp.handle = cpu_to_le16(conn->handle); - memcpy(cp.ltk, ltk, sizeof(ltk)); - - hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); -} -EXPORT_SYMBOL(hci_le_ltk_reply); - -void hci_le_ltk_neg_reply(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - struct hci_cp_le_ltk_neg_reply cp; - - BT_DBG("%p", conn); - - memset(&cp, 0, sizeof(cp)); - - cp.handle = cpu_to_le16(conn->handle); - - hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp); -} - /* Device _must_ be locked */ void hci_sco_setup(struct hci_conn *conn, __u8 status) { @@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route); /* Create SCO, ACL or LE connection. * Device _must_ be locked */ -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) +struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, + __u8 dst_type, __u8 sec_level, __u8 auth_type) { struct hci_conn *acl; struct hci_conn *sco; @@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 BT_DBG("%s dst %s", hdev->name, batostr(dst)); if (type == LE_LINK) { - struct adv_entry *entry; - le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); - if (le) - return ERR_PTR(-EBUSY); - - entry = hci_find_adv_entry(hdev, dst); - if (!entry) - return ERR_PTR(-EHOSTUNREACH); + if (!le) { + le = hci_conn_add(hdev, LE_LINK, dst); + if (!le) + return ERR_PTR(-ENOMEM); - le = hci_conn_add(hdev, LE_LINK, dst); - if (!le) - return ERR_PTR(-ENOMEM); - - le->dst_type = entry->bdaddr_type; + le->dst_type = bdaddr_to_le(dst_type); + hci_le_connect(le); + } - hci_le_connect(le); + le->pending_sec_level = sec_level; + le->auth_type = auth_type; hci_conn_hold(le); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index d6dc44cd15b..411ace8e647 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) */ if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) { struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; + u16 opcode = __le16_to_cpu(sent->opcode); struct sk_buff *skb; /* Some CSR based controllers generate a spontaneous @@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) * command. */ - if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET) + if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET) return; skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC); @@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev) /* Read Local Version */ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); + + /* Read Local AMP Info */ + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); } static void hci_init_req(struct hci_dev *hdev, unsigned long opt) @@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) case DISCOVERY_STOPPED: if (hdev->discovery.state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); - hdev->discovery.type = 0; break; case DISCOVERY_STARTING: break; @@ -1089,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = { .set_block = hci_rfkill_set_block, }; -/* Alloc HCI device */ -struct hci_dev *hci_alloc_dev(void) -{ - struct hci_dev *hdev; - - hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); - if (!hdev) - return NULL; - - hci_init_sysfs(hdev); - skb_queue_head_init(&hdev->driver_init); - - return hdev; -} -EXPORT_SYMBOL(hci_alloc_dev); - -/* Free HCI device */ -void hci_free_dev(struct hci_dev *hdev) -{ - skb_queue_purge(&hdev->driver_init); - - /* will free via device release */ - put_device(&hdev->dev); -} -EXPORT_SYMBOL(hci_free_dev); - static void hci_power_on(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); @@ -1336,7 +1313,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, } int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, - int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16 + int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8]) { struct smp_ltk *key, *old_key; @@ -1544,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) return mgmt_device_unblocked(hdev, bdaddr, type); } -static void hci_clear_adv_cache(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - adv_work.work); - - hci_dev_lock(hdev); - - hci_adv_entries_clear(hdev); - - hci_dev_unlock(hdev); -} - -int hci_adv_entries_clear(struct hci_dev *hdev) -{ - struct adv_entry *entry, *tmp; - - list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) { - list_del(&entry->list); - kfree(entry); - } - - BT_DBG("%s adv cache cleared", hdev->name); - - return 0; -} - -struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr) -{ - struct adv_entry *entry; - - list_for_each_entry(entry, &hdev->adv_entries, list) - if (bacmp(bdaddr, &entry->bdaddr) == 0) - return entry; - - return NULL; -} - -static inline int is_connectable_adv(u8 evt_type) -{ - if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND) - return 1; - - return 0; -} - -int hci_add_adv_entry(struct hci_dev *hdev, - struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type)) - return -EINVAL; - - /* Only new entries should be added to adv_entries. So, if - * bdaddr was found, don't add it. */ - if (hci_find_adv_entry(hdev, &ev->bdaddr)) - return 0; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - bacpy(&entry->bdaddr, &ev->bdaddr); - entry->bdaddr_type = ev->bdaddr_type; - - list_add(&entry->list, &hdev->adv_entries); - - BT_DBG("%s adv entry added: address %s type %u", hdev->name, - batostr(&entry->bdaddr), entry->bdaddr_type); - - return 0; -} - static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt) { struct le_scan_params *param = (struct le_scan_params *) opt; @@ -1670,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, return 0; } +int hci_cancel_le_scan(struct hci_dev *hdev) +{ + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + return -EALREADY; + + if (cancel_delayed_work(&hdev->le_scan_disable)) { + struct hci_cp_le_set_scan_enable cp; + + /* Send HCI command to disable LE Scan */ + memset(&cp, 0, sizeof(cp)); + hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + } + + return 0; +} + static void le_scan_disable_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, @@ -1714,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window, return 0; } -/* Register HCI device */ -int hci_register_dev(struct hci_dev *hdev) +/* Alloc HCI device */ +struct hci_dev *hci_alloc_dev(void) { - struct list_head *head = &hci_dev_list, *p; - int i, id, error; - - BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - - if (!hdev->open || !hdev->close) - return -EINVAL; - - /* Do not allow HCI_AMP devices to register at index 0, - * so the index can be used as the AMP controller ID. - */ - id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; - - write_lock(&hci_dev_list_lock); - - /* Find first available device id */ - list_for_each(p, &hci_dev_list) { - if (list_entry(p, struct hci_dev, list)->id != id) - break; - head = p; id++; - } - - sprintf(hdev->name, "hci%d", id); - hdev->id = id; - list_add_tail(&hdev->list, head); + struct hci_dev *hdev; - mutex_init(&hdev->lock); + hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); + if (!hdev) + return NULL; - hdev->flags = 0; - hdev->dev_flags = 0; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); hdev->io_capability = 0x03; /* No Input No Output */ - hdev->idle_timeout = 0; hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; + mutex_init(&hdev->lock); + mutex_init(&hdev->req_lock); + + INIT_LIST_HEAD(&hdev->mgmt_pending); + INIT_LIST_HEAD(&hdev->blacklist); + INIT_LIST_HEAD(&hdev->uuids); + INIT_LIST_HEAD(&hdev->link_keys); + INIT_LIST_HEAD(&hdev->long_term_keys); + INIT_LIST_HEAD(&hdev->remote_oob_data); + INIT_WORK(&hdev->rx_work, hci_rx_work); INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); + INIT_WORK(&hdev->power_on, hci_power_on); + INIT_WORK(&hdev->le_scan, le_scan_work); + INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); + INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); + INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); + skb_queue_head_init(&hdev->driver_init); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); - setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev); - - for (i = 0; i < NUM_REASSEMBLY; i++) - hdev->reassembly[i] = NULL; - init_waitqueue_head(&hdev->req_wait_q); - mutex_init(&hdev->req_lock); - discovery_init(hdev); + setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev); + hci_init_sysfs(hdev); + discovery_init(hdev); hci_conn_hash_init(hdev); - INIT_LIST_HEAD(&hdev->mgmt_pending); - - INIT_LIST_HEAD(&hdev->blacklist); + return hdev; +} +EXPORT_SYMBOL(hci_alloc_dev); - INIT_LIST_HEAD(&hdev->uuids); +/* Free HCI device */ +void hci_free_dev(struct hci_dev *hdev) +{ + skb_queue_purge(&hdev->driver_init); - INIT_LIST_HEAD(&hdev->link_keys); - INIT_LIST_HEAD(&hdev->long_term_keys); + /* will free via device release */ + put_device(&hdev->dev); +} +EXPORT_SYMBOL(hci_free_dev); - INIT_LIST_HEAD(&hdev->remote_oob_data); +/* Register HCI device */ +int hci_register_dev(struct hci_dev *hdev) +{ + struct list_head *head, *p; + int id, error; - INIT_LIST_HEAD(&hdev->adv_entries); + if (!hdev->open || !hdev->close) + return -EINVAL; - INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache); - INIT_WORK(&hdev->power_on, hci_power_on); - INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); + write_lock(&hci_dev_list_lock); - INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); + /* Do not allow HCI_AMP devices to register at index 0, + * so the index can be used as the AMP controller ID. + */ + id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; + head = &hci_dev_list; - memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); + /* Find first available device id */ + list_for_each(p, &hci_dev_list) { + int nid = list_entry(p, struct hci_dev, list)->id; + if (nid > id) + break; + if (nid == id) + id++; + head = p; + } - atomic_set(&hdev->promisc, 0); + sprintf(hdev->name, "hci%d", id); + hdev->id = id; - INIT_WORK(&hdev->le_scan, le_scan_work); + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); + list_add(&hdev->list, head); write_unlock(&hci_dev_list_lock); @@ -1884,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_del_sysfs(hdev); - cancel_delayed_work_sync(&hdev->adv_work); - destroy_workqueue(hdev->workqueue); hci_dev_lock(hdev); @@ -1894,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_link_keys_clear(hdev); hci_smp_ltks_clear(hdev); hci_remote_oob_data_clear(hdev); - hci_adv_entries_clear(hdev); hci_dev_unlock(hdev); hci_dev_put(hdev); @@ -2231,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, struct hci_dev *hdev = conn->hdev; struct sk_buff *list; + skb->len = skb_headlen(skb); + skb->data_len = 0; + + bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + hci_add_acl_hdr(skb, conn->handle, flags); + list = skb_shinfo(skb)->frag_list; if (!list) { /* Non fragmented */ @@ -2274,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); skb->dev = (void *) hdev; - bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; - hci_add_acl_hdr(skb, conn->handle, flags); hci_queue_acl(conn, &chan->data_q, skb, flags); @@ -2313,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL, *c; - int num = 0, min = ~0; + unsigned int num = 0, min = ~0; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ @@ -2394,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_chan *chan = NULL; - int num = 0, min = ~0, cur_prio = 0; + unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; int cnt, q, conn_num = 0; @@ -2945,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev) BT_DBG("%s", hdev->name); if (!test_bit(HCI_INQUIRY, &hdev->flags)) - return -EPERM; + return -EALREADY; return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); } + +u8 bdaddr_to_le(u8 bdaddr_type) +{ + switch (bdaddr_type) { + case BDADDR_LE_PUBLIC: + return ADDR_LE_DEV_PUBLIC; + + default: + /* Fallback to LE Random address type */ + return ADDR_LE_DEV_RANDOM; + } +} diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 1266f78fa8e..94ad124a4ea 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_check_pending(hdev); } +static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); +} + static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; + clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); + hci_conn_check_pending(hdev); } @@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_RESET, status); /* Reset all non-persistent flags */ - hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS)); + hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) | + BIT(HCI_PERIODIC_INQ)); hdev->discovery.state = DISCOVERY_STOPPED; } @@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev) events[5] |= 0x10; /* Synchronous Connection Changed */ if (hdev->features[3] & LMP_RSSI_INQ) - events[4] |= 0x04; /* Inquiry Result with RSSI */ + events[4] |= 0x02; /* Inquiry Result with RSSI */ if (hdev->features[5] & LMP_SNIFF_SUBR) events[5] |= 0x20; /* Sniff Subrating */ @@ -615,6 +630,7 @@ done: static void hci_setup_link_policy(struct hci_dev *hdev) { + struct hci_cp_write_def_link_policy cp; u16 link_policy = 0; if (hdev->features[0] & LMP_RSWITCH) @@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev) if (hdev->features[1] & LMP_PARK) link_policy |= HCI_LP_PARK; - link_policy = cpu_to_le16(link_policy); - hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy), - &link_policy); + cp.policy = cpu_to_le16(link_policy); + hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); } static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) @@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev) memset(&cp, 0, sizeof(cp)); - if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { cp.le = 1; cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); } @@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; - BT_DBG("%s status 0x%x", hdev->name, status); + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (!rp->status) + hdev->inq_tx_power = rp->tx_power; - hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status); + hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status); } static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) @@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, set_bit(HCI_LE_SCAN, &hdev->dev_flags); - cancel_delayed_work_sync(&hdev->adv_work); - hci_dev_lock(hdev); - hci_adv_entries_clear(hdev); hci_discovery_set_state(hdev, DISCOVERY_FINDING); hci_dev_unlock(hdev); break; case LE_SCANNING_DISABLED: - if (status) + if (status) { + hci_dev_lock(hdev); + mgmt_stop_discovery_failed(hdev, status); + hci_dev_unlock(hdev); return; + } clear_bit(HCI_LE_SCAN, &hdev->dev_flags); - schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT); - - if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) { + if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && + hdev->discovery.state == DISCOVERY_FINDING) { mgmt_interleaved_discovery(hdev); } else { hci_dev_lock(hdev); @@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) if (status) { if (conn && conn->state == BT_CONNECT) { conn->state = BT_CLOSED; + mgmt_connect_failed(hdev, &cp->peer_addr, conn->type, + conn->dst_type, status); hci_proto_connect_cfm(conn, status); hci_conn_del(conn); } @@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * if (!num_rsp) return; + if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) + return; + hci_dev_lock(hdev); for (; num_rsp; num_rsp--, info++) { @@ -2040,7 +2063,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); if (ev->status && conn->state == BT_CONNECTED) { - hci_acl_disconn(conn, 0x13); + hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_put(conn); goto unlock; } @@ -2154,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk hci_cc_inquiry_cancel(hdev, skb); break; + case HCI_OP_PERIODIC_INQ: + hci_cc_periodic_inq(hdev, skb); + break; + case HCI_OP_EXIT_PERIODIC_INQ: hci_cc_exit_periodic_inq(hdev, skb); break; @@ -2806,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct if (!num_rsp) return; + if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) + return; + hci_dev_lock(hdev); if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { @@ -2971,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct struct inquiry_data data; struct extended_inquiry_info *info = (void *) (skb->data + 1); int num_rsp = *((__u8 *) skb->data); + size_t eir_len; BT_DBG("%s num_rsp %d", hdev->name, num_rsp); if (!num_rsp) return; + if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) + return; + hci_dev_lock(hdev); for (; num_rsp; num_rsp--, info++) { @@ -3000,11 +3034,56 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct name_known = hci_inquiry_cache_update(hdev, &data, name_known, &ssp); + eir_len = eir_get_length(info->data, sizeof(info->data)); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, !name_known, - ssp, info->data, sizeof(info->data)); + ssp, info->data, eir_len); + } + + hci_dev_unlock(hdev); +} + +static void hci_key_refresh_complete_evt(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_ev_key_refresh_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %u handle %u", hdev->name, ev->status, + __le16_to_cpu(ev->handle)); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + if (!ev->status) + conn->sec_level = conn->pending_sec_level; + + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + + if (ev->status && conn->state == BT_CONNECTED) { + hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); + hci_conn_put(conn); + goto unlock; + } + + if (conn->state == BT_CONFIG) { + if (!ev->status) + conn->state = BT_CONNECTED; + + hci_proto_connect_cfm(conn, ev->status); + hci_conn_put(conn); + } else { + hci_auth_cfm(conn, ev->status); + + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_put(conn); } +unlock: hci_dev_unlock(hdev); } @@ -3322,8 +3401,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev, while (num_reports--) { struct hci_ev_le_advertising_info *ev = ptr; - hci_add_adv_entry(hdev, ev); - rssi = ev->data[ev->length]; mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, NULL, rssi, 0, 1, ev->data, ev->length); @@ -3343,7 +3420,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, struct hci_conn *conn; struct smp_ltk *ltk; - BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); + BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle)); hci_dev_lock(hdev); @@ -3526,6 +3603,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_extended_inquiry_result_evt(hdev, skb); break; + case HCI_EV_KEY_REFRESH_COMPLETE: + hci_key_refresh_complete_evt(hdev, skb); + break; + case HCI_EV_IO_CAPA_REQUEST: hci_io_capa_request_evt(hdev, skb); break; diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index bc154298979..937f3187eaf 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = { static void print_bt_uuid(struct seq_file *f, u8 *uuid) { - u32 data0, data4; - u16 data1, data2, data3, data5; + __be32 data0, data4; + __be16 data1, data2, data3, data5; memcpy(&data0, &uuid[0], 4); memcpy(&data1, &uuid[4], 2); @@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev) BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - dev->parent = hdev->parent; dev_set_name(dev, "%s", hdev->name); err = device_add(dev); diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 4deaca78e91..9332bc7aa85 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig @@ -1,6 +1,6 @@ config BT_HIDP tristate "HIDP protocol support" - depends on BT && INPUT && HID_SUPPORT + depends on BT && INPUT select HID help HIDP (Human Interface Device Protocol) is a transport layer diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index d478be11d56..2c20d765b39 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -1195,41 +1195,16 @@ int hidp_get_conninfo(struct hidp_conninfo *ci) return err; } -static const struct hid_device_id hidp_table[] = { - { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) }, - { } -}; - -static struct hid_driver hidp_driver = { - .name = "generic-bluetooth", - .id_table = hidp_table, -}; - static int __init hidp_init(void) { - int ret; - BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); - ret = hid_register_driver(&hidp_driver); - if (ret) - goto err; - - ret = hidp_init_sockets(); - if (ret) - goto err_drv; - - return 0; -err_drv: - hid_unregister_driver(&hidp_driver); -err: - return ret; + return hidp_init_sockets(); } static void __exit hidp_exit(void) { hidp_cleanup_sockets(); - hid_unregister_driver(&hidp_driver); } module_init(hidp_init); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 6f9c25b633a..4554e80d16a 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4,6 +4,7 @@ Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> Copyright (C) 2010 Google Inc. Copyright (C) 2011 ProFUSION Embedded Systems + Copyright (c) 2012 Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -70,7 +71,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data); static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); static void l2cap_send_disconn_req(struct l2cap_conn *conn, - struct l2cap_chan *chan, int err); + struct l2cap_chan *chan, int err); /* ---- L2CAP channels ---- */ @@ -97,13 +98,15 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 } /* Find channel with given SCID. - * Returns locked socket */ + * Returns locked channel. */ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); + if (c) + l2cap_chan_lock(c); mutex_unlock(&conn->chan_lock); return c; @@ -120,17 +123,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 return NULL; } -static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) -{ - struct l2cap_chan *c; - - mutex_lock(&conn->chan_lock); - c = __l2cap_get_chan_by_ident(conn, ident); - mutex_unlock(&conn->chan_lock); - - return c; -} - static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) { struct l2cap_chan *c; @@ -232,6 +224,124 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) release_sock(sk); } +/* ---- L2CAP sequence number lists ---- */ + +/* For ERTM, ordered lists of sequence numbers must be tracked for + * SREJ requests that are received and for frames that are to be + * retransmitted. These seq_list functions implement a singly-linked + * list in an array, where membership in the list can also be checked + * in constant time. Items can also be added to the tail of the list + * and removed from the head in constant time, without further memory + * allocs or frees. + */ + +static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) +{ + size_t alloc_size, i; + + /* Allocated size is a power of 2 to map sequence numbers + * (which may be up to 14 bits) in to a smaller array that is + * sized for the negotiated ERTM transmit windows. + */ + alloc_size = roundup_pow_of_two(size); + + seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL); + if (!seq_list->list) + return -ENOMEM; + + seq_list->mask = alloc_size - 1; + seq_list->head = L2CAP_SEQ_LIST_CLEAR; + seq_list->tail = L2CAP_SEQ_LIST_CLEAR; + for (i = 0; i < alloc_size; i++) + seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; + + return 0; +} + +static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) +{ + kfree(seq_list->list); +} + +static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, + u16 seq) +{ + /* Constant-time check for list membership */ + return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; +} + +static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq) +{ + u16 mask = seq_list->mask; + + if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) { + /* In case someone tries to pop the head of an empty list */ + return L2CAP_SEQ_LIST_CLEAR; + } else if (seq_list->head == seq) { + /* Head can be removed in constant time */ + seq_list->head = seq_list->list[seq & mask]; + seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; + + if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { + seq_list->head = L2CAP_SEQ_LIST_CLEAR; + seq_list->tail = L2CAP_SEQ_LIST_CLEAR; + } + } else { + /* Walk the list to find the sequence number */ + u16 prev = seq_list->head; + while (seq_list->list[prev & mask] != seq) { + prev = seq_list->list[prev & mask]; + if (prev == L2CAP_SEQ_LIST_TAIL) + return L2CAP_SEQ_LIST_CLEAR; + } + + /* Unlink the number from the list and clear it */ + seq_list->list[prev & mask] = seq_list->list[seq & mask]; + seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; + if (seq_list->tail == seq) + seq_list->tail = prev; + } + return seq; +} + +static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) +{ + /* Remove the head in constant time */ + return l2cap_seq_list_remove(seq_list, seq_list->head); +} + +static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) +{ + u16 i; + + if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) + return; + + for (i = 0; i <= seq_list->mask; i++) + seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; + + seq_list->head = L2CAP_SEQ_LIST_CLEAR; + seq_list->tail = L2CAP_SEQ_LIST_CLEAR; +} + +static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) +{ + u16 mask = seq_list->mask; + + /* All appends happen in constant time */ + + if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) + return; + + if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) + seq_list->head = seq; + else + seq_list->list[seq_list->tail & mask] = seq; + + seq_list->tail = seq; + seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; +} + static void l2cap_chan_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, @@ -262,7 +372,7 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_put(chan); } -struct l2cap_chan *l2cap_chan_create(struct sock *sk) +struct l2cap_chan *l2cap_chan_create(void) { struct l2cap_chan *chan; @@ -272,8 +382,6 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk) mutex_init(&chan->lock); - chan->sk = sk; - write_lock(&chan_list_lock); list_add(&chan->global_l, &chan_list); write_unlock(&chan_list_lock); @@ -284,7 +392,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk) atomic_set(&chan->refcnt, 1); - BT_DBG("sk %p chan %p", sk, chan); + BT_DBG("chan %p", chan); return chan; } @@ -298,10 +406,21 @@ void l2cap_chan_destroy(struct l2cap_chan *chan) l2cap_chan_put(chan); } -void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +void l2cap_chan_set_defaults(struct l2cap_chan *chan) +{ + chan->fcs = L2CAP_FCS_CRC16; + chan->max_tx = L2CAP_DEFAULT_MAX_TX; + chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; + chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; + chan->sec_level = BT_SECURITY_LOW; + + set_bit(FLAG_FORCE_ACTIVE, &chan->flags); +} + +static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, - chan->psm, chan->dcid); + __le16_to_cpu(chan->psm), chan->dcid); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; @@ -347,7 +466,7 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) list_add(&chan->list, &conn->chan_l); } -void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { mutex_lock(&conn->chan_lock); __l2cap_chan_add(conn, chan); @@ -405,6 +524,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err) skb_queue_purge(&chan->srej_q); + l2cap_seq_list_free(&chan->srej_list); + l2cap_seq_list_free(&chan->retrans_list); list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { list_del(&l->list); kfree(l); @@ -453,7 +574,6 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) case BT_CONFIG: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && conn->hcon->type == ACL_LINK) { - __clear_chan_timer(chan); __set_chan_timer(chan, sk->sk_sndtimeo); l2cap_send_disconn_req(conn, chan, reason); } else @@ -466,7 +586,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) struct l2cap_conn_rsp rsp; __u16 result; - if (bt_sk(sk)->defer_setup) + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; @@ -599,6 +719,117 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) hci_send_acl(chan->conn->hchan, skb, flags); } +static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) +{ + control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; + control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; + + if (enh & L2CAP_CTRL_FRAME_TYPE) { + /* S-Frame */ + control->sframe = 1; + control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; + control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; + + control->sar = 0; + control->txseq = 0; + } else { + /* I-Frame */ + control->sframe = 0; + control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; + control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; + + control->poll = 0; + control->super = 0; + } +} + +static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) +{ + control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; + control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; + + if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { + /* S-Frame */ + control->sframe = 1; + control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; + control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; + + control->sar = 0; + control->txseq = 0; + } else { + /* I-Frame */ + control->sframe = 0; + control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; + control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; + + control->poll = 0; + control->super = 0; + } +} + +static inline void __unpack_control(struct l2cap_chan *chan, + struct sk_buff *skb) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { + __unpack_extended_control(get_unaligned_le32(skb->data), + &bt_cb(skb)->control); + } else { + __unpack_enhanced_control(get_unaligned_le16(skb->data), + &bt_cb(skb)->control); + } +} + +static u32 __pack_extended_control(struct l2cap_ctrl *control) +{ + u32 packed; + + packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; + packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; + + if (control->sframe) { + packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; + packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; + packed |= L2CAP_EXT_CTRL_FRAME_TYPE; + } else { + packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; + packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; + } + + return packed; +} + +static u16 __pack_enhanced_control(struct l2cap_ctrl *control) +{ + u16 packed; + + packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; + packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; + + if (control->sframe) { + packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; + packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; + packed |= L2CAP_CTRL_FRAME_TYPE; + } else { + packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; + packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; + } + + return packed; +} + +static inline void __pack_control(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { + put_unaligned_le32(__pack_extended_control(control), + skb->data + L2CAP_HDR_SIZE); + } else { + put_unaligned_le16(__pack_enhanced_control(control), + skb->data + L2CAP_HDR_SIZE); + } +} + static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) { struct sk_buff *skb; @@ -681,10 +912,38 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan) l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); } +static void l2cap_chan_ready(struct l2cap_chan *chan) +{ + struct sock *sk = chan->sk; + struct sock *parent; + + lock_sock(sk); + + parent = bt_sk(sk)->parent; + + BT_DBG("sk %p, parent %p", sk, parent); + + chan->conf_state = 0; + __clear_chan_timer(chan); + + __l2cap_state_change(chan, BT_CONNECTED); + sk->sk_state_change(sk); + + if (parent) + parent->sk_data_ready(parent, 0); + + release_sock(sk); +} + static void l2cap_do_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; + if (conn->hcon->type == LE_LINK) { + l2cap_chan_ready(chan); + return; + } + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; @@ -791,7 +1050,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn) if (l2cap_chan_check_security(chan)) { lock_sock(sk); - if (bt_sk(sk)->defer_setup) { + if (test_bit(BT_SK_DEFER_SETUP, + &bt_sk(sk)->flags)) { struct sock *parent = bt_sk(sk)->parent; rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); @@ -830,10 +1090,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn) mutex_unlock(&conn->chan_lock); } -/* Find socket with cid and source bdaddr. +/* Find socket with cid and source/destination bdaddr. * Returns closest match, locked. */ -static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src) +static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid, + bdaddr_t *src, + bdaddr_t *dst) { struct l2cap_chan *c, *c1 = NULL; @@ -846,14 +1108,22 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd continue; if (c->scid == cid) { + int src_match, dst_match; + int src_any, dst_any; + /* Exact match. */ - if (!bacmp(&bt_sk(sk)->src, src)) { + src_match = !bacmp(&bt_sk(sk)->src, src); + dst_match = !bacmp(&bt_sk(sk)->dst, dst); + if (src_match && dst_match) { read_unlock(&chan_list_lock); return c; } /* Closest match */ - if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) + src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY); + dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY); + if ((src_match && dst_any) || (src_any && dst_match) || + (src_any && dst_any)) c1 = c; } } @@ -872,7 +1142,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) /* Check if we have socket listening on cid */ pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, - conn->src); + conn->src, conn->dst); if (!pchan) return; @@ -910,29 +1180,6 @@ clean: release_sock(parent); } -static void l2cap_chan_ready(struct l2cap_chan *chan) -{ - struct sock *sk = chan->sk; - struct sock *parent; - - lock_sock(sk); - - parent = bt_sk(sk)->parent; - - BT_DBG("sk %p, parent %p", sk, parent); - - chan->conf_state = 0; - __clear_chan_timer(chan); - - __l2cap_state_change(chan, BT_CONNECTED); - sk->sk_state_change(sk); - - if (parent) - parent->sk_data_ready(parent, 0); - - release_sock(sk); -} - static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan *chan; @@ -1016,6 +1263,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) /* Kill channels */ list_for_each_entry_safe(chan, l, &conn->chan_l, list) { + l2cap_chan_hold(chan); l2cap_chan_lock(chan); l2cap_chan_del(chan, err); @@ -1023,6 +1271,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_unlock(chan); chan->ops->close(chan->data); + l2cap_chan_put(chan); } mutex_unlock(&conn->chan_lock); @@ -1046,7 +1295,12 @@ static void security_timeout(struct work_struct *work) struct l2cap_conn *conn = container_of(work, struct l2cap_conn, security_timer.work); - l2cap_conn_del(conn->hcon, ETIMEDOUT); + BT_DBG("conn %p", conn); + + if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { + smp_chan_destroy(conn); + l2cap_conn_del(conn->hcon, ETIMEDOUT); + } } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) @@ -1100,10 +1354,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) /* ---- Socket interface ---- */ -/* Find socket with psm and source bdaddr. +/* Find socket with psm and source / destination bdaddr. * Returns closest match. */ -static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src) +static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, + bdaddr_t *src, + bdaddr_t *dst) { struct l2cap_chan *c, *c1 = NULL; @@ -1116,14 +1372,22 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr continue; if (c->psm == psm) { + int src_match, dst_match; + int src_any, dst_any; + /* Exact match. */ - if (!bacmp(&bt_sk(sk)->src, src)) { + src_match = !bacmp(&bt_sk(sk)->src, src); + dst_match = !bacmp(&bt_sk(sk)->dst, dst); + if (src_match && dst_match) { read_unlock(&chan_list_lock); return c; } /* Closest match */ - if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) + src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY); + dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY); + if ((src_match && dst_any) || (src_any && dst_match) || + (src_any && dst_any)) c1 = c; } } @@ -1133,7 +1397,8 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr return c1; } -int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) +int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, + bdaddr_t *dst, u8 dst_type) { struct sock *sk = chan->sk; bdaddr_t *src = &bt_sk(sk)->src; @@ -1143,8 +1408,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d __u8 auth_type; int err; - BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), - chan->psm); + BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst), + dst_type, __le16_to_cpu(chan->psm)); hdev = hci_get_route(dst, src); if (!hdev) @@ -1218,11 +1483,11 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d auth_type = l2cap_get_auth_type(chan); if (chan->dcid == L2CAP_CID_LE_DATA) - hcon = hci_connect(hdev, LE_LINK, dst, - chan->sec_level, auth_type); + hcon = hci_connect(hdev, LE_LINK, dst, dst_type, + chan->sec_level, auth_type); else - hcon = hci_connect(hdev, ACL_LINK, dst, - chan->sec_level, auth_type); + hcon = hci_connect(hdev, ACL_LINK, dst, dst_type, + chan->sec_level, auth_type); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); @@ -1236,6 +1501,18 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d goto done; } + if (hcon->type == LE_LINK) { + err = 0; + + if (!list_empty(&conn->chan_l)) { + err = -EBUSY; + hci_conn_put(hcon); + } + + if (err) + goto done; + } + /* Update source addr of the socket */ bacpy(src, conn->src); @@ -1346,7 +1623,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan) while ((skb = skb_peek(&chan->tx_q)) && chan->unacked_frames) { - if (bt_cb(skb)->tx_seq == chan->expected_ack_seq) + if (bt_cb(skb)->control.txseq == chan->expected_ack_seq) break; skb = skb_dequeue(&chan->tx_q); @@ -1368,6 +1645,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan) while ((skb = skb_dequeue(&chan->tx_q))) { control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); control |= __set_txseq(chan, chan->next_tx_seq); + control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { @@ -1393,21 +1671,21 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) if (!skb) return; - while (bt_cb(skb)->tx_seq != tx_seq) { + while (bt_cb(skb)->control.txseq != tx_seq) { if (skb_queue_is_last(&chan->tx_q, skb)) return; skb = skb_queue_next(&chan->tx_q, skb); } - if (chan->remote_max_tx && - bt_cb(skb)->retries == chan->remote_max_tx) { + if (bt_cb(skb)->control.retries == chan->remote_max_tx && + chan->remote_max_tx) { l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); return; } tx_skb = skb_clone(skb, GFP_ATOMIC); - bt_cb(skb)->retries++; + bt_cb(skb)->control.retries++; control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); control &= __get_sar_mask(chan); @@ -1440,17 +1718,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) if (chan->state != BT_CONNECTED) return -ENOTCONN; + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + return 0; + while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { - if (chan->remote_max_tx && - bt_cb(skb)->retries == chan->remote_max_tx) { + if (bt_cb(skb)->control.retries == chan->remote_max_tx && + chan->remote_max_tx) { l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); break; } tx_skb = skb_clone(skb, GFP_ATOMIC); - bt_cb(skb)->retries++; + bt_cb(skb)->control.retries++; control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); control &= __get_sar_mask(chan); @@ -1460,6 +1741,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) control |= __set_reqseq(chan, chan->buffer_seq); control |= __set_txseq(chan, chan->next_tx_seq); + control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); @@ -1474,11 +1756,11 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) __set_retrans_timer(chan); - bt_cb(skb)->tx_seq = chan->next_tx_seq; + bt_cb(skb)->control.txseq = chan->next_tx_seq; chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); - if (bt_cb(skb)->retries == 1) { + if (bt_cb(skb)->control.retries == 1) { chan->unacked_frames++; if (!nsent++) @@ -1554,7 +1836,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, { struct l2cap_conn *conn = chan->conn; struct sk_buff **frag; - int err, sent = 0; + int sent = 0; if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) return -EFAULT; @@ -1565,14 +1847,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { + struct sk_buff *tmp; + count = min_t(unsigned int, conn->mtu, len); - *frag = chan->ops->alloc_skb(chan, count, - msg->msg_flags & MSG_DONTWAIT, - &err); + tmp = chan->ops->alloc_skb(chan, count, + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + + *frag = tmp; - if (!*frag) - return err; if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) return -EFAULT; @@ -1581,6 +1866,9 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, sent += count; len -= count; + skb->len += (*frag)->len; + skb->data_len += (*frag)->len; + frag = &(*frag)->next; } @@ -1601,18 +1889,17 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, count + hlen, - msg->msg_flags & MSG_DONTWAIT, &err); - - if (!skb) - return ERR_PTR(err); + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(skb)) + return skb; skb->priority = priority; /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); - lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); - put_unaligned_le16(chan->psm, skb_put(skb, 2)); + lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); + put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE)); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { @@ -1628,25 +1915,24 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; - int err, count, hlen = L2CAP_HDR_SIZE; + int err, count; struct l2cap_hdr *lh; BT_DBG("chan %p len %d", chan, (int)len); - count = min_t(unsigned int, (conn->mtu - hlen), len); + count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); - skb = chan->ops->alloc_skb(chan, count + hlen, - msg->msg_flags & MSG_DONTWAIT, &err); - - if (!skb) - return ERR_PTR(err); + skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE, + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(skb)) + return skb; skb->priority = priority; /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); - lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); + lh->len = cpu_to_le16(len); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { @@ -1658,7 +1944,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, - u32 control, u16 sdulen) + u16 sdulen) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; @@ -1684,17 +1970,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, count + hlen, - msg->msg_flags & MSG_DONTWAIT, &err); - - if (!skb) - return ERR_PTR(err); + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(skb)) + return skb; /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); - __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); + __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); @@ -1708,61 +1993,82 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, if (chan->fcs == L2CAP_FCS_CRC16) put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); - bt_cb(skb)->retries = 0; + bt_cb(skb)->control.retries = 0; return skb; } -static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) +static int l2cap_segment_sdu(struct l2cap_chan *chan, + struct sk_buff_head *seg_queue, + struct msghdr *msg, size_t len) { struct sk_buff *skb; - struct sk_buff_head sar_queue; - u32 control; - size_t size = 0; + u16 sdu_len; + size_t pdu_len; + int err = 0; + u8 sar; - skb_queue_head_init(&sar_queue); - control = __set_ctrl_sar(chan, L2CAP_SAR_START); - skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); - if (IS_ERR(skb)) - return PTR_ERR(skb); + BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); - __skb_queue_tail(&sar_queue, skb); - len -= chan->remote_mps; - size += chan->remote_mps; + /* It is critical that ERTM PDUs fit in a single HCI fragment, + * so fragmented skbs are not used. The HCI layer's handling + * of fragmented skbs is not compatible with ERTM's queueing. + */ - while (len > 0) { - size_t buflen; + /* PDU size is derived from the HCI MTU */ + pdu_len = chan->conn->mtu; - if (len > chan->remote_mps) { - control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); - buflen = chan->remote_mps; - } else { - control = __set_ctrl_sar(chan, L2CAP_SAR_END); - buflen = len; - } + pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); + + /* Adjust for largest possible L2CAP overhead. */ + pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; + + /* Remote device may have requested smaller PDUs */ + pdu_len = min_t(size_t, pdu_len, chan->remote_mps); + + if (len <= pdu_len) { + sar = L2CAP_SAR_UNSEGMENTED; + sdu_len = 0; + pdu_len = len; + } else { + sar = L2CAP_SAR_START; + sdu_len = len; + pdu_len -= L2CAP_SDULEN_SIZE; + } + + while (len > 0) { + skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); - skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0); if (IS_ERR(skb)) { - skb_queue_purge(&sar_queue); + __skb_queue_purge(seg_queue); return PTR_ERR(skb); } - __skb_queue_tail(&sar_queue, skb); - len -= buflen; - size += buflen; + bt_cb(skb)->control.sar = sar; + __skb_queue_tail(seg_queue, skb); + + len -= pdu_len; + if (sdu_len) { + sdu_len = 0; + pdu_len += L2CAP_SDULEN_SIZE; + } + + if (len <= pdu_len) { + sar = L2CAP_SAR_END; + pdu_len = len; + } else { + sar = L2CAP_SAR_CONTINUE; + } } - skb_queue_splice_tail(&sar_queue, &chan->tx_q); - if (chan->tx_send_head == NULL) - chan->tx_send_head = sar_queue.next; - return size; + return err; } int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u32 priority) { struct sk_buff *skb; - u32 control; int err; + struct sk_buff_head seg_queue; /* Connectionless channel */ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { @@ -1791,42 +2097,47 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: - /* Entire SDU fits into one PDU */ - if (len <= chan->remote_mps) { - control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); - skb = l2cap_create_iframe_pdu(chan, msg, len, control, - 0); - if (IS_ERR(skb)) - return PTR_ERR(skb); + /* Check outgoing MTU */ + if (len > chan->omtu) { + err = -EMSGSIZE; + break; + } - __skb_queue_tail(&chan->tx_q, skb); + __skb_queue_head_init(&seg_queue); - if (chan->tx_send_head == NULL) - chan->tx_send_head = skb; + /* Do segmentation before calling in to the state machine, + * since it's possible to block while waiting for memory + * allocation. + */ + err = l2cap_segment_sdu(chan, &seg_queue, msg, len); - } else { - /* Segment SDU into multiples PDUs */ - err = l2cap_sar_segment_sdu(chan, msg, len); - if (err < 0) - return err; + /* The channel could have been closed while segmenting, + * check that it is still connected. + */ + if (chan->state != BT_CONNECTED) { + __skb_queue_purge(&seg_queue); + err = -ENOTCONN; } - if (chan->mode == L2CAP_MODE_STREAMING) { - l2cap_streaming_send(chan); - err = len; + if (err) break; - } - if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && - test_bit(CONN_WAIT_F, &chan->conn_state)) { - err = len; - break; - } + if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL) + chan->tx_send_head = seg_queue.next; + skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); + + if (chan->mode == L2CAP_MODE_ERTM) + err = l2cap_ertm_send(chan); + else + l2cap_streaming_send(chan); - err = l2cap_ertm_send(chan); if (err >= 0) err = len; + /* If the skbs were not queued for sending, they'll still be in + * seg_queue and need to be purged. + */ + __skb_queue_purge(&seg_queue); break; default: @@ -2040,13 +2351,29 @@ static void l2cap_ack_timeout(struct work_struct *work) l2cap_chan_put(chan); } -static inline void l2cap_ertm_init(struct l2cap_chan *chan) +static inline int l2cap_ertm_init(struct l2cap_chan *chan) { + int err; + + chan->next_tx_seq = 0; + chan->expected_tx_seq = 0; chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; chan->num_acked = 0; chan->frames_sent = 0; + chan->last_acked_seq = 0; + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + + skb_queue_head_init(&chan->tx_q); + + if (chan->mode != L2CAP_MODE_ERTM) + return 0; + + chan->rx_state = L2CAP_RX_STATE_RECV; + chan->tx_state = L2CAP_TX_STATE_XMIT; INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); @@ -2055,6 +2382,11 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan) skb_queue_head_init(&chan->srej_q); INIT_LIST_HEAD(&chan->srej_l); + err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); + if (err < 0) + return err; + + return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) @@ -2378,9 +2710,9 @@ done: chan->remote_mps = size; rfc.retrans_timeout = - le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); + __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); rfc.monitor_timeout = - le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); + __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); set_bit(CONF_MODE_DONE, &chan->conf_state); @@ -2583,12 +2915,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); - switch (type) { - case L2CAP_CONF_RFC: - if (olen == sizeof(rfc)) - memcpy(&rfc, (void *)val, olen); - goto done; - } + if (type != L2CAP_CONF_RFC) + continue; + + if (olen != sizeof(rfc)) + break; + + memcpy(&rfc, (void *)val, olen); + goto done; } /* Use sane default values in case a misbehaving remote device @@ -2644,10 +2978,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd u16 dcid = 0, scid = __le16_to_cpu(req->scid); __le16 psm = req->psm; - BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); + BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); /* Check if we have socket listening on psm */ - pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src); + pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst); if (!pchan) { result = L2CAP_CR_BAD_PSM; goto sendresp; @@ -2706,7 +3040,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { if (l2cap_chan_check_security(chan)) { - if (bt_sk(sk)->defer_setup) { + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { __l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHOR_PEND; @@ -2848,7 +3182,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr u16 dcid, flags; u8 rsp[64]; struct l2cap_chan *chan; - int len; + int len, err = 0; dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); @@ -2859,8 +3193,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (!chan) return -ENOENT; - l2cap_chan_lock(chan); - if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { struct l2cap_cmd_rej_cid rej; @@ -2915,13 +3247,15 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr l2cap_state_change(chan, BT_CONNECTED); - chan->next_tx_seq = 0; - chan->expected_tx_seq = 0; - skb_queue_head_init(&chan->tx_q); - if (chan->mode == L2CAP_MODE_ERTM) - l2cap_ertm_init(chan); + if (chan->mode == L2CAP_MODE_ERTM || + chan->mode == L2CAP_MODE_STREAMING) + err = l2cap_ertm_init(chan); + + if (err < 0) + l2cap_send_disconn_req(chan->conn, chan, -err); + else + l2cap_chan_ready(chan); - l2cap_chan_ready(chan); goto unlock; } @@ -2949,7 +3283,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr unlock: l2cap_chan_unlock(chan); - return 0; + return err; } static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) @@ -2957,21 +3291,20 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; u16 scid, flags, result; struct l2cap_chan *chan; - int len = cmd->len - sizeof(*rsp); + int len = le16_to_cpu(cmd->len) - sizeof(*rsp); + int err = 0; scid = __le16_to_cpu(rsp->scid); flags = __le16_to_cpu(rsp->flags); result = __le16_to_cpu(rsp->result); - BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", - scid, flags, result); + BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, + result, len); chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) return 0; - l2cap_chan_lock(chan); - switch (result) { case L2CAP_CONF_SUCCESS: l2cap_conf_rfc_get(chan, rsp->data, len); @@ -3045,18 +3378,19 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr set_default_fcs(chan); l2cap_state_change(chan, BT_CONNECTED); - chan->next_tx_seq = 0; - chan->expected_tx_seq = 0; - skb_queue_head_init(&chan->tx_q); - if (chan->mode == L2CAP_MODE_ERTM) - l2cap_ertm_init(chan); + if (chan->mode == L2CAP_MODE_ERTM || + chan->mode == L2CAP_MODE_STREAMING) + err = l2cap_ertm_init(chan); - l2cap_chan_ready(chan); + if (err < 0) + l2cap_send_disconn_req(chan->conn, chan, -err); + else + l2cap_chan_ready(chan); } done: l2cap_chan_unlock(chan); - return 0; + return err; } static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) @@ -3092,11 +3426,13 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd sk->sk_shutdown = SHUTDOWN_MASK; release_sock(sk); + l2cap_chan_hold(chan); l2cap_chan_del(chan, ECONNRESET); l2cap_chan_unlock(chan); chan->ops->close(chan->data); + l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -3124,11 +3460,13 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd l2cap_chan_lock(chan); + l2cap_chan_hold(chan); l2cap_chan_del(chan, 0); l2cap_chan_unlock(chan); chan->ops->close(chan->data); + l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -3265,8 +3603,8 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn, /* Placeholder: Always reject */ rsp.dcid = 0; rsp.scid = cpu_to_le16(scid); - rsp.result = L2CAP_CR_NO_MEM; - rsp.status = L2CAP_CS_NO_INFO; + rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM); + rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, sizeof(rsp), &rsp); @@ -3665,19 +4003,19 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, struct sk_buff *next_skb; int tx_seq_offset, next_tx_seq_offset; - bt_cb(skb)->tx_seq = tx_seq; - bt_cb(skb)->sar = sar; + bt_cb(skb)->control.txseq = tx_seq; + bt_cb(skb)->control.sar = sar; next_skb = skb_peek(&chan->srej_q); tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); while (next_skb) { - if (bt_cb(next_skb)->tx_seq == tx_seq) + if (bt_cb(next_skb)->control.txseq == tx_seq) return -EINVAL; next_tx_seq_offset = __seq_offset(chan, - bt_cb(next_skb)->tx_seq, chan->buffer_seq); + bt_cb(next_skb)->control.txseq, chan->buffer_seq); if (next_tx_seq_offset > tx_seq_offset) { __skb_queue_before(&chan->srej_q, next_skb, skb); @@ -3800,6 +4138,7 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) BT_DBG("chan %p, Enter local busy", chan); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); + l2cap_seq_list_clear(&chan->srej_list); __set_ack_timer(chan); } @@ -3848,11 +4187,11 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { int err; - if (bt_cb(skb)->tx_seq != tx_seq) + if (bt_cb(skb)->control.txseq != tx_seq) break; skb = skb_dequeue(&chan->srej_q); - control = __set_ctrl_sar(chan, bt_cb(skb)->sar); + control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar); err = l2cap_reassemble_sdu(chan, skb, control); if (err < 0) { @@ -3892,6 +4231,7 @@ static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) while (tx_seq != chan->expected_tx_seq) { control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); control |= __set_reqseq(chan, chan->expected_tx_seq); + l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq); l2cap_send_sframe(chan, control); new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); @@ -4022,8 +4362,8 @@ expected: chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { - bt_cb(skb)->tx_seq = tx_seq; - bt_cb(skb)->sar = sar; + bt_cb(skb)->control.txseq = tx_seq; + bt_cb(skb)->control.sar = sar; __skb_queue_tail(&chan->srej_q, skb); return 0; } @@ -4220,6 +4560,8 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) u16 req_seq; int len, next_tx_seq_offset, req_seq_offset; + __unpack_control(chan, skb); + control = __get_control(chan, skb->data); skb_pull(skb, __ctrl_size(chan)); len = skb->len; @@ -4295,8 +4637,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk return 0; } - l2cap_chan_lock(chan); - BT_DBG("chan %p, len %d", chan, skb->len); if (chan->state != BT_CONNECTED) @@ -4375,7 +4715,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str { struct l2cap_chan *chan; - chan = l2cap_global_chan_by_psm(0, psm, conn->src); + chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst); if (!chan) goto drop; @@ -4396,11 +4736,12 @@ drop: return 0; } -static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb) +static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, + struct sk_buff *skb) { struct l2cap_chan *chan; - chan = l2cap_global_chan_by_scid(0, cid, conn->src); + chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst); if (!chan) goto drop; @@ -4445,7 +4786,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) break; case L2CAP_CID_CONN_LESS: - psm = get_unaligned_le16(skb->data); + psm = get_unaligned((__le16 *) skb->data); skb_pull(skb, 2); l2cap_conless_channel(conn, psm, skb); break; @@ -4540,7 +4881,6 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) if (encrypt == 0x00) { if (chan->sec_level == BT_SECURITY_MEDIUM) { - __clear_chan_timer(chan); __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); } else if (chan->sec_level == BT_SECURITY_HIGH) l2cap_chan_close(chan, ECONNREFUSED); @@ -4561,7 +4901,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) BT_DBG("conn %p", conn); if (hcon->type == LE_LINK) { - smp_distribute_keys(conn, 0); + if (!status && encrypt) + smp_distribute_keys(conn, 0); cancel_delayed_work(&conn->security_timer); } @@ -4591,7 +4932,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) chan->state == BT_CONFIG)) { struct sock *sk = chan->sk; - bt_sk(sk)->suspended = false; + clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); sk->sk_state_change(sk); l2cap_check_encryption(chan, encrypt); @@ -4603,7 +4944,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) if (!status) { l2cap_send_conn_req(chan); } else { - __clear_chan_timer(chan); __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } } else if (chan->state == BT_CONNECT2) { @@ -4614,7 +4954,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) lock_sock(sk); if (!status) { - if (bt_sk(sk)->defer_setup) { + if (test_bit(BT_SK_DEFER_SETUP, + &bt_sk(sk)->flags)) { struct sock *parent = bt_sk(sk)->parent; res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; @@ -4664,8 +5005,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) if (!(flags & ACL_CONT)) { struct l2cap_hdr *hdr; - struct l2cap_chan *chan; - u16 cid; int len; if (conn->rx_len) { @@ -4685,7 +5024,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) hdr = (struct l2cap_hdr *) skb->data; len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; - cid = __le16_to_cpu(hdr->cid); if (len == skb->len) { /* Complete frame received */ @@ -4702,23 +5040,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) goto drop; } - chan = l2cap_get_chan_by_scid(conn, cid); - - if (chan && chan->sk) { - struct sock *sk = chan->sk; - lock_sock(sk); - - if (chan->imtu < len - L2CAP_HDR_SIZE) { - BT_ERR("Frame exceeding recv MTU (len %d, " - "MTU %d)", len, - chan->imtu); - release_sock(sk); - l2cap_conn_unreliable(conn, ECOMM); - goto drop; - } - release_sock(sk); - } - /* Allocate skb for the complete frame (with header) */ conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); if (!conn->rx_skb) diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 04e7c172d49..3bb1611b9d4 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -124,7 +124,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al return -EINVAL; err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), - &la.l2_bdaddr); + &la.l2_bdaddr, la.l2_bdaddr_type); if (err) return err; @@ -148,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) lock_sock(sk); - if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) - || sk->sk_state != BT_BOUND) { + if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } + if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + switch (chan->mode) { case L2CAP_MODE_BASIC: break; @@ -320,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us case L2CAP_CONNINFO: if (sk->sk_state != BT_CONNECTED && - !(sk->sk_state == BT_CONNECT2 && - bt_sk(sk)->defer_setup)) { + !(sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } @@ -375,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch } memset(&sec, 0, sizeof(sec)); - sec.level = chan->sec_level; + if (chan->conn) + sec.level = chan->conn->hcon->sec_level; + else + sec.level = chan->sec_level; if (sk->sk_state == BT_CONNECTED) sec.key_size = chan->conn->hcon->enc_key_size; @@ -392,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; } - if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *) optval)) err = -EFAULT; break; @@ -594,10 +602,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch /* or for ACL link */ } else if ((sk->sk_state == BT_CONNECT2 && - bt_sk(sk)->defer_setup) || + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || sk->sk_state == BT_CONNECTED) { if (!l2cap_chan_check_security(chan)) - bt_sk(sk)->suspended = true; + set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); else sk->sk_state_change(sk); } else { @@ -616,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch break; } - bt_sk(sk)->defer_setup = opt; + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; case BT_FLUSHABLE: @@ -716,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; - lock_sock(sk); - - if (sk->sk_state != BT_CONNECTED) { - release_sock(sk); + if (sk->sk_state != BT_CONNECTED) return -ENOTCONN; - } + l2cap_chan_lock(chan); err = l2cap_chan_send(chan, msg, len, sk->sk_priority); + l2cap_chan_unlock(chan); - release_sock(sk); return err; } @@ -737,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms lock_sock(sk); - if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { + if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, + &bt_sk(sk)->flags)) { sk->sk_state = BT_CONFIG; pi->chan->state = BT_CONFIG; @@ -931,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state) } static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, - unsigned long len, int nb, - int *err) + unsigned long len, int nb) { - struct sock *sk = chan->sk; + struct sk_buff *skb; + int err; + + l2cap_chan_unlock(chan); + skb = bt_skb_send_alloc(chan->sk, len, nb, &err); + l2cap_chan_lock(chan); + + if (!skb) + return ERR_PTR(err); - return bt_skb_send_alloc(sk, len, nb, err); + return skb; } static struct l2cap_ops l2cap_chan_ops = { @@ -952,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); + l2cap_chan_put(l2cap_pi(sk)->chan); if (l2cap_pi(sk)->rx_busy_skb) { kfree_skb(l2cap_pi(sk)->rx_busy_skb); l2cap_pi(sk)->rx_busy_skb = NULL; @@ -972,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) struct l2cap_chan *pchan = l2cap_pi(parent)->chan; sk->sk_type = parent->sk_type; - bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; + bt_sk(sk)->flags = bt_sk(parent)->flags; chan->chan_type = pchan->chan_type; chan->imtu = pchan->imtu; @@ -1010,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) } else { chan->mode = L2CAP_MODE_BASIC; } - chan->max_tx = L2CAP_DEFAULT_MAX_TX; - chan->fcs = L2CAP_FCS_CRC16; - chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; - chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; - chan->sec_level = BT_SECURITY_LOW; - chan->flags = 0; - set_bit(FLAG_FORCE_ACTIVE, &chan->flags); + + l2cap_chan_set_defaults(chan); } /* Default config options */ @@ -1052,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p sk->sk_protocol = proto; sk->sk_state = BT_OPEN; - chan = l2cap_chan_create(sk); + chan = l2cap_chan_create(); if (!chan) { l2cap_sock_kill(sk); return NULL; } + l2cap_chan_hold(chan); + + chan->sk = sk; + l2cap_pi(sk)->chan = chan; return sk; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 4bb03b11112..3e5e3362ea0 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -35,10 +35,9 @@ #include <net/bluetooth/smp.h> bool enable_hs; -bool enable_le; #define MGMT_VERSION 1 -#define MGMT_REVISION 0 +#define MGMT_REVISION 1 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, @@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_CONFIRM_NAME, MGMT_OP_BLOCK_DEVICE, MGMT_OP_UNBLOCK_DEVICE, + MGMT_OP_SET_DEVICE_ID, }; static const u16 mgmt_events[] = { @@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) ev = (void *) skb_put(skb, sizeof(*ev)); ev->status = status; - put_unaligned_le16(cmd, &ev->opcode); + ev->opcode = cpu_to_le16(cmd); err = sock_queue_rcv_skb(sk, skb); if (err < 0) @@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); - put_unaligned_le16(cmd, &ev->opcode); + ev->opcode = cpu_to_le16(cmd); ev->status = status; if (rp) @@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("sock %p", sk); rp.version = MGMT_VERSION; - put_unaligned_le16(MGMT_REVISION, &rp.revision); + rp.revision = __constant_cpu_to_le16(MGMT_REVISION); return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp, sizeof(rp)); @@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_commands *rp; - u16 num_commands = ARRAY_SIZE(mgmt_commands); - u16 num_events = ARRAY_SIZE(mgmt_events); - u16 *opcode; + const u16 num_commands = ARRAY_SIZE(mgmt_commands); + const u16 num_events = ARRAY_SIZE(mgmt_events); + __le16 *opcode; size_t rp_size; int i, err; @@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, if (!rp) return -ENOMEM; - put_unaligned_le16(num_commands, &rp->num_commands); - put_unaligned_le16(num_events, &rp->num_events); + rp->num_commands = __constant_cpu_to_le16(num_commands); + rp->num_events = __constant_cpu_to_le16(num_events); for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++) put_unaligned_le16(mgmt_commands[i], opcode); @@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, return -ENOMEM; } - put_unaligned_le16(count, &rp->num_controllers); + rp->num_controllers = cpu_to_le16(count); i = 0; list_for_each_entry(d, &hci_dev_list, list) { if (test_bit(HCI_SETUP, &d->dev_flags)) continue; - put_unaligned_le16(d->id, &rp->index[i++]); + rp->index[i++] = cpu_to_le16(d->id); BT_DBG("Added hci%u", d->id); } @@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (enable_hs) settings |= MGMT_SETTING_HS; - if (enable_le) { - if (hdev->features[4] & LMP_LE) - settings |= MGMT_SETTING_LE; - } + if (hdev->features[4] & LMP_LE) + settings |= MGMT_SETTING_LE; return settings; } @@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128) return 0; } - memcpy(&val, &uuid128[12], 4); - - val = le32_to_cpu(val); + val = get_unaligned_le32(&uuid128[12]); if (val > 0xffff) return 0; @@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data) ptr += (name_len + 2); } + if (hdev->inq_tx_power) { + ptr[0] = 2; + ptr[1] = EIR_TX_POWER; + ptr[2] = (u8) hdev->inq_tx_power; + + eir_len += 3; + ptr += 3; + } + + if (hdev->devid_source > 0) { + ptr[0] = 9; + ptr[1] = EIR_DEVICE_ID; + + put_unaligned_le16(hdev->devid_source, ptr + 2); + put_unaligned_le16(hdev->devid_vendor, ptr + 4); + put_unaligned_le16(hdev->devid_product, ptr + 6); + put_unaligned_le16(hdev->devid_version, ptr + 8); + + eir_len += 10; + ptr += 10; + } + memset(uuid16_list, 0, sizeof(uuid16_list)); /* Group all UUID16 types */ @@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev, bacpy(&rp.bdaddr, &hdev->bdaddr); rp.version = hdev->hci_ver; - - put_unaligned_le16(hdev->manufacturer, &rp.manufacturer); + rp.manufacturer = cpu_to_le16(hdev->manufacturer); rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); rp.current_settings = cpu_to_le32(get_current_settings(hdev)); @@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("request for %s", hdev->name); - timeout = get_unaligned_le16(&cp->timeout); + timeout = __le16_to_cpu(cp->timeout); if (!cp->val && timeout > 0) return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_INVALID_PARAMS); @@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, - MGMT_STATUS_BUSY); + err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_BUSY); goto failed; } @@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) hci_dev_lock(hdev); - if (!enable_le || !(hdev->features[4] & LMP_LE)) { + if (!(hdev->features[4] & LMP_LE)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_NOT_SUPPORTED); goto unlock; @@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), &hci_cp); - if (err < 0) { + if (err < 0) mgmt_pending_remove(cmd); - goto unlock; - } unlock: hci_dev_unlock(hdev); @@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto failed; - } failed: hci_dev_unlock(hdev); @@ -1368,10 +1381,8 @@ update_class: } cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } unlock: hci_dev_unlock(hdev); @@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, } cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } unlock: hci_dev_unlock(hdev); @@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 key_count, expected_len; int i; - key_count = get_unaligned_le16(&cp->key_count); + key_count = __le16_to_cpu(cp->key_count); expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_link_key_info); @@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - if (cp->addr.type == MGMT_ADDR_BREDR) + if (cp->addr.type == BDADDR_BREDR) err = hci_remove_link_key(hdev, &cp->addr.bdaddr); else err = hci_remove_ltk(hdev, &cp->addr.bdaddr); @@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, } if (cp->disconnect) { - if (cp->addr.type == MGMT_ADDR_BREDR) + if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else @@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - put_unaligned_le16(conn->handle, &dc.handle); + dc.handle = cpu_to_le16(conn->handle); dc.reason = 0x13; /* Remote User Terminated Connection */ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); if (err < 0) @@ -1584,12 +1593,12 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - if (cp->addr.type == MGMT_ADDR_BREDR) + if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); - if (!conn) { + if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_NOT_CONNECTED); goto failed; @@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - put_unaligned_le16(conn->handle, &dc.handle); + dc.handle = cpu_to_le16(conn->handle); dc.reason = 0x13; /* Remote User Terminated Connection */ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); @@ -1613,22 +1622,22 @@ failed: return err; } -static u8 link_to_mgmt(u8 link_type, u8 addr_type) +static u8 link_to_bdaddr(u8 link_type, u8 addr_type) { switch (link_type) { case LE_LINK: switch (addr_type) { case ADDR_LE_DEV_PUBLIC: - return MGMT_ADDR_LE_PUBLIC; - case ADDR_LE_DEV_RANDOM: - return MGMT_ADDR_LE_RANDOM; + return BDADDR_LE_PUBLIC; + default: - return MGMT_ADDR_INVALID; + /* Fallback to LE Random address type */ + return BDADDR_LE_RANDOM; } - case ACL_LINK: - return MGMT_ADDR_BREDR; + default: - return MGMT_ADDR_INVALID; + /* Fallback to BR/EDR type */ + return BDADDR_BREDR; } } @@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) continue; bacpy(&rp->addr[i].bdaddr, &c->dst); - rp->addr[i].type = link_to_mgmt(c->type, c->dst_type); - if (rp->addr[i].type == MGMT_ADDR_INVALID) + rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type); + if (c->type == SCO_LINK || c->type == ESCO_LINK) continue; i++; } - put_unaligned_le16(i, &rp->conn_count); + rp->conn_count = cpu_to_le16(i); /* Recalculate length in case of filtered SCO connections, etc */ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); @@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status) struct hci_conn *conn = cmd->user_data; bacpy(&rp.addr.bdaddr, &conn->dst); - rp.addr.type = link_to_mgmt(conn->type, conn->dst_type); + rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); @@ -1864,6 +1873,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) pairing_complete(cmd, mgmt_status(status)); } +static void le_connect_complete_cb(struct hci_conn *conn, u8 status) +{ + struct pending_cmd *cmd; + + BT_DBG("status %u", status); + + if (!status) + return; + + cmd = find_pairing(conn); + if (!cmd) + BT_DBG("Unable to find a pending command"); + else + pairing_complete(cmd, mgmt_status(status)); +} + static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -1890,12 +1915,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, else auth_type = HCI_AT_DEDICATED_BONDING_MITM; - if (cp->addr.type == MGMT_ADDR_BREDR) - conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level, - auth_type); + if (cp->addr.type == BDADDR_BREDR) + conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, + cp->addr.type, sec_level, auth_type); else - conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level, - auth_type); + conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, + cp->addr.type, sec_level, auth_type); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); @@ -1923,8 +1948,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, } /* For LE, just connecting isn't a proof that the pairing finished */ - if (cp->addr.type == MGMT_ADDR_BREDR) + if (cp->addr.type == BDADDR_BREDR) conn->connect_cfm_cb = pairing_complete_cb; + else + conn->connect_cfm_cb = le_connect_complete_cb; conn->security_cfm_cb = pairing_complete_cb; conn->disconn_cfm_cb = pairing_complete_cb; @@ -2000,7 +2027,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, goto done; } - if (type == MGMT_ADDR_BREDR) + if (type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); @@ -2011,7 +2038,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, goto done; } - if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) { + if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) { /* Continue with pairing via SMP */ err = smp_user_confirm_reply(conn, mgmt_op, passkey); @@ -2295,6 +2322,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, goto failed; } + if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { + err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, + MGMT_STATUS_BUSY); + goto failed; + } + if (hdev->discovery.state != DISCOVERY_STOPPED) { err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, MGMT_STATUS_BUSY); @@ -2381,27 +2414,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - if (hdev->discovery.state == DISCOVERY_FINDING) { - err = hci_cancel_inquiry(hdev); - if (err < 0) - mgmt_pending_remove(cmd); + switch (hdev->discovery.state) { + case DISCOVERY_FINDING: + if (test_bit(HCI_INQUIRY, &hdev->flags)) + err = hci_cancel_inquiry(hdev); else - hci_discovery_set_state(hdev, DISCOVERY_STOPPING); - goto unlock; - } + err = hci_cancel_le_scan(hdev); - e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING); - if (!e) { - mgmt_pending_remove(cmd); - err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0, - &mgmt_cp->type, sizeof(mgmt_cp->type)); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - goto unlock; + break; + + case DISCOVERY_RESOLVING: + e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, + NAME_PENDING); + if (!e) { + mgmt_pending_remove(cmd); + err = cmd_complete(sk, hdev->id, + MGMT_OP_STOP_DISCOVERY, 0, + &mgmt_cp->type, + sizeof(mgmt_cp->type)); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + goto unlock; + } + + bacpy(&cp.bdaddr, &e->data.bdaddr); + err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, + sizeof(cp), &cp); + + break; + + default: + BT_DBG("unknown discovery state %u", hdev->discovery.state); + err = -EFAULT; } - bacpy(&cp.bdaddr, &e->data.bdaddr); - err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), - &cp); if (err < 0) mgmt_pending_remove(cmd); else @@ -2501,6 +2546,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, return err; } +static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_device_id *cp = data; + int err; + __u16 source; + + BT_DBG("%s", hdev->name); + + source = __le16_to_cpu(cp->source); + + if (source > 0x0002) + return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + hdev->devid_source = source; + hdev->devid_vendor = __le16_to_cpu(cp->vendor); + hdev->devid_product = __le16_to_cpu(cp->product); + hdev->devid_version = __le16_to_cpu(cp->version); + + err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); + + update_eir(hdev); + + hci_dev_unlock(hdev); + + return err; +} + static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -2565,7 +2641,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, u16 key_count, expected_len; int i; - key_count = get_unaligned_le16(&cp->key_count); + key_count = __le16_to_cpu(cp->key_count); expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_ltk_info); @@ -2591,7 +2667,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, else type = HCI_SMP_LTK_SLAVE; - hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type, + hci_add_ltk(hdev, &key->addr.bdaddr, + bdaddr_to_le(key->addr.type), type, 0, key->authenticated, key->val, key->enc_size, key->ediv, key->rand); } @@ -2601,7 +2678,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, return 0; } -struct mgmt_handler { +static const struct mgmt_handler { int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len); bool var_len; @@ -2647,6 +2724,7 @@ struct mgmt_handler { { confirm_name, false, MGMT_CONFIRM_NAME_SIZE }, { block_device, false, MGMT_BLOCK_DEVICE_SIZE }, { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE }, + { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE }, }; @@ -2657,7 +2735,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) struct mgmt_hdr *hdr; u16 opcode, index, len; struct hci_dev *hdev = NULL; - struct mgmt_handler *handler; + const struct mgmt_handler *handler; int err; BT_DBG("got %zu bytes", msglen); @@ -2675,9 +2753,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) } hdr = buf; - opcode = get_unaligned_le16(&hdr->opcode); - index = get_unaligned_le16(&hdr->index); - len = get_unaligned_le16(&hdr->len); + opcode = __le16_to_cpu(hdr->opcode); + index = __le16_to_cpu(hdr->index); + len = __le16_to_cpu(hdr->len); if (len != msglen - sizeof(*hdr)) { err = -EINVAL; @@ -2884,7 +2962,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) return 0; } -int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent) +int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, + bool persistent) { struct mgmt_ev_new_link_key ev; @@ -2892,7 +2971,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persisten ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); - ev.key.addr.type = MGMT_ADDR_BREDR; + ev.key.addr.type = BDADDR_BREDR; ev.key.type = key->type; memcpy(ev.key.val, key->val, 16); ev.key.pin_len = key->pin_len; @@ -2908,7 +2987,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent) ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); - ev.key.addr.type = key->bdaddr_type; + ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); ev.key.authenticated = key->authenticated; ev.key.enc_size = key->enc_size; ev.key.ediv = key->ediv; @@ -2932,7 +3011,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u16 eir_len = 0; bacpy(&ev->addr.bdaddr, bdaddr); - ev->addr.type = link_to_mgmt(link_type, addr_type); + ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->flags = __cpu_to_le32(flags); @@ -2944,7 +3023,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, dev_class, 3); - put_unaligned_le16(eir_len, &ev->eir_len); + ev->eir_len = cpu_to_le16(eir_len); return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, sizeof(*ev) + eir_len, NULL); @@ -2995,13 +3074,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); bacpy(&ev.bdaddr, bdaddr); - ev.type = link_to_mgmt(link_type, addr_type); + ev.type = link_to_bdaddr(link_type, addr_type); err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk); if (sk) - sock_put(sk); + sock_put(sk); mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, hdev); @@ -3021,7 +3100,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, return -ENOENT; bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = link_to_mgmt(link_type, addr_type); + rp.addr.type = link_to_bdaddr(link_type, addr_type); err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, mgmt_status(status), &rp, sizeof(rp)); @@ -3039,7 +3118,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, struct mgmt_ev_connect_failed ev; bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_mgmt(link_type, addr_type); + ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.status = mgmt_status(status); return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); @@ -3050,7 +3129,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) struct mgmt_ev_pin_code_request ev; bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = MGMT_ADDR_BREDR; + ev.addr.type = BDADDR_BREDR; ev.secure = secure; return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), @@ -3069,7 +3148,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, return -ENOENT; bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = MGMT_ADDR_BREDR; + rp.addr.type = BDADDR_BREDR; err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, mgmt_status(status), &rp, sizeof(rp)); @@ -3091,7 +3170,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, return -ENOENT; bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = MGMT_ADDR_BREDR; + rp.addr.type = BDADDR_BREDR; err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, mgmt_status(status), &rp, sizeof(rp)); @@ -3110,9 +3189,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, BT_DBG("%s", hdev->name); bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_mgmt(link_type, addr_type); + ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.confirm_hint = confirm_hint; - put_unaligned_le32(value, &ev.value); + ev.value = value; return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), NULL); @@ -3126,7 +3205,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, BT_DBG("%s", hdev->name); bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_mgmt(link_type, addr_type); + ev.addr.type = link_to_bdaddr(link_type, addr_type); return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), NULL); @@ -3145,7 +3224,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, return -ENOENT; bacpy(&rp.addr.bdaddr, bdaddr); - rp.addr.type = link_to_mgmt(link_type, addr_type); + rp.addr.type = link_to_bdaddr(link_type, addr_type); err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status), &rp, sizeof(rp)); @@ -3188,7 +3267,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, struct mgmt_ev_auth_failed ev; bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_mgmt(link_type, addr_type); + ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.status = mgmt_status(status); return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); @@ -3413,10 +3492,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) if (enable && test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags)) - err = new_settings(hdev, NULL); + err = new_settings(hdev, NULL); - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, - cmd_status_rsp, &mgmt_err); + mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, + &mgmt_err); return err; } @@ -3455,7 +3534,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, memset(buf, 0, sizeof(buf)); bacpy(&ev->addr.bdaddr, bdaddr); - ev->addr.type = link_to_mgmt(link_type, addr_type); + ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; if (cfm_name) ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME; @@ -3469,7 +3548,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, dev_class, 3); - put_unaligned_le16(eir_len, &ev->eir_len); + ev->eir_len = cpu_to_le16(eir_len); ev_size = sizeof(*ev) + eir_len; @@ -3488,13 +3567,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, memset(buf, 0, sizeof(buf)); bacpy(&ev->addr.bdaddr, bdaddr); - ev->addr.type = link_to_mgmt(link_type, addr_type); + ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, name_len); - put_unaligned_le16(eir_len, &ev->eir_len); + ev->eir_len = cpu_to_le16(eir_len); return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL); @@ -3594,6 +3673,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) module_param(enable_hs, bool, 0644); MODULE_PARM_DESC(enable_hs, "Enable High Speed support"); - -module_param(enable_le, bool, 0644); -MODULE_PARM_DESC(enable_le, "Enable Low Energy support"); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index a55a43e9f70..e8707debb86 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent) if (parent) { sk->sk_type = parent->sk_type; - pi->dlc->defer_setup = bt_sk(parent)->defer_setup; + pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP, + &bt_sk(parent)->flags); pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; @@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c break; } - bt_sk(sk)->defer_setup = opt; + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + break; default: @@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c break; } - if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *) optval)) err = -EFAULT; break; @@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc * done: bh_unlock_sock(parent); - if (bt_sk(parent)->defer_setup) + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) parent->sk_state_change(parent); return result; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 4bf54b37725..d1820ff14ae 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -48,13 +48,12 @@ static struct tty_driver *rfcomm_tty_driver; struct rfcomm_dev { + struct tty_port port; struct list_head list; - atomic_t refcnt; char name[12]; int id; unsigned long flags; - atomic_t opened; int err; bdaddr_t src; @@ -64,9 +63,7 @@ struct rfcomm_dev { uint modem_status; struct rfcomm_dlc *dlc; - struct tty_struct *tty; wait_queue_head_t wait; - struct work_struct wakeup_task; struct device *tty_dev; @@ -82,11 +79,18 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); -static void rfcomm_tty_wakeup(struct work_struct *work); - /* ---- Device functions ---- */ -static void rfcomm_dev_destruct(struct rfcomm_dev *dev) + +/* + * The reason this isn't actually a race, as you no doubt have a little voice + * screaming at you in your head, is that the refcount should never actually + * reach zero unless the device has already been taken off the list, in + * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in + * rfcomm_dev_destruct() anyway. + */ +static void rfcomm_dev_destruct(struct tty_port *port) { + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); struct rfcomm_dlc *dlc = dev->dlc; BT_DBG("dev %p dlc %p", dev, dlc); @@ -113,23 +117,9 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev) module_put(THIS_MODULE); } -static inline void rfcomm_dev_hold(struct rfcomm_dev *dev) -{ - atomic_inc(&dev->refcnt); -} - -static inline void rfcomm_dev_put(struct rfcomm_dev *dev) -{ - /* The reason this isn't actually a race, as you no - doubt have a little voice screaming at you in your - head, is that the refcount should never actually - reach zero unless the device has already been taken - off the list, in rfcomm_dev_del(). And if that's not - true, we'll hit the BUG() in rfcomm_dev_destruct() - anyway. */ - if (atomic_dec_and_test(&dev->refcnt)) - rfcomm_dev_destruct(dev); -} +static const struct tty_port_operations rfcomm_port_ops = { + .destruct = rfcomm_dev_destruct, +}; static struct rfcomm_dev *__rfcomm_dev_get(int id) { @@ -154,7 +144,7 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id) if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) dev = NULL; else - rfcomm_dev_hold(dev); + tty_port_get(&dev->port); } spin_unlock(&rfcomm_dev_lock); @@ -241,7 +231,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) sprintf(dev->name, "rfcomm%d", dev->id); list_add(&dev->list, head); - atomic_set(&dev->refcnt, 1); bacpy(&dev->src, &req->src); bacpy(&dev->dst, &req->dst); @@ -250,10 +239,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) dev->flags = req->flags & ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); - atomic_set(&dev->opened, 0); - + tty_port_init(&dev->port); + dev->port.ops = &rfcomm_port_ops; init_waitqueue_head(&dev->wait); - INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup); skb_queue_head_init(&dev->pending); @@ -320,18 +308,23 @@ free: static void rfcomm_dev_del(struct rfcomm_dev *dev) { + unsigned long flags; BT_DBG("dev %p", dev); BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); - if (atomic_read(&dev->opened) > 0) + spin_lock_irqsave(&dev->port.lock, flags); + if (dev->port.count > 0) { + spin_unlock_irqrestore(&dev->port.lock, flags); return; + } + spin_unlock_irqrestore(&dev->port.lock, flags); spin_lock(&rfcomm_dev_lock); list_del_init(&dev->list); spin_unlock(&rfcomm_dev_lock); - rfcomm_dev_put(dev); + tty_port_put(&dev->port); } /* ---- Send buffer ---- */ @@ -345,15 +338,16 @@ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc) static void rfcomm_wfree(struct sk_buff *skb) { struct rfcomm_dev *dev = (void *) skb->sk; + struct tty_struct *tty = dev->port.tty; atomic_sub(skb->truesize, &dev->wmem_alloc); - if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) - queue_work(system_nrt_wq, &dev->wakeup_task); - rfcomm_dev_put(dev); + if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty) + tty_wakeup(tty); + tty_port_put(&dev->port); } static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) { - rfcomm_dev_hold(dev); + tty_port_get(&dev->port); atomic_add(skb->truesize, &dev->wmem_alloc); skb->sk = (void *) dev; skb->destructor = rfcomm_wfree; @@ -432,7 +426,7 @@ static int rfcomm_release_dev(void __user *arg) return -ENODEV; if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) { - rfcomm_dev_put(dev); + tty_port_put(&dev->port); return -EPERM; } @@ -440,12 +434,12 @@ static int rfcomm_release_dev(void __user *arg) rfcomm_dlc_close(dev->dlc, 0); /* Shut down TTY synchronously before freeing rfcomm_dev */ - if (dev->tty) - tty_vhangup(dev->tty); + if (dev->port.tty) + tty_vhangup(dev->port.tty); if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) rfcomm_dev_del(dev); - rfcomm_dev_put(dev); + tty_port_put(&dev->port); return 0; } @@ -523,7 +517,7 @@ static int rfcomm_get_dev_info(void __user *arg) if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; - rfcomm_dev_put(dev); + tty_port_put(&dev->port); return err; } @@ -559,7 +553,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) return; } - tty = dev->tty; + tty = dev->port.tty; if (!tty || !skb_queue_empty(&dev->pending)) { skb_queue_tail(&dev->pending, skb); return; @@ -585,13 +579,13 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) wake_up_interruptible(&dev->wait); if (dlc->state == BT_CLOSED) { - if (!dev->tty) { + if (!dev->port.tty) { if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { /* Drop DLC lock here to avoid deadlock * 1. rfcomm_dev_get will take rfcomm_dev_lock * but in rfcomm_dev_add there's lock order: * rfcomm_dev_lock -> dlc lock - * 2. rfcomm_dev_put will deadlock if it's + * 2. tty_port_put will deadlock if it's * the last reference */ rfcomm_dlc_unlock(dlc); @@ -601,11 +595,11 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) } rfcomm_dev_del(dev); - rfcomm_dev_put(dev); + tty_port_put(&dev->port); rfcomm_dlc_lock(dlc); } } else - tty_hangup(dev->tty); + tty_hangup(dev->port.tty); } } @@ -618,8 +612,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) { - if (dev->tty && !C_CLOCAL(dev->tty)) - tty_hangup(dev->tty); + if (dev->port.tty && !C_CLOCAL(dev->port.tty)) + tty_hangup(dev->port.tty); } dev->modem_status = @@ -630,21 +624,9 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) } /* ---- TTY functions ---- */ -static void rfcomm_tty_wakeup(struct work_struct *work) -{ - struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev, - wakeup_task); - struct tty_struct *tty = dev->tty; - if (!tty) - return; - - BT_DBG("dev %p tty %p", dev, tty); - tty_wakeup(tty); -} - static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) { - struct tty_struct *tty = dev->tty; + struct tty_struct *tty = dev->port.tty; struct sk_buff *skb; int inserted = 0; @@ -671,6 +653,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) DECLARE_WAITQUEUE(wait, current); struct rfcomm_dev *dev; struct rfcomm_dlc *dlc; + unsigned long flags; int err, id; id = tty->index; @@ -686,10 +669,14 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) return -ENODEV; BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), - dev->channel, atomic_read(&dev->opened)); + dev->channel, dev->port.count); - if (atomic_inc_return(&dev->opened) > 1) + spin_lock_irqsave(&dev->port.lock, flags); + if (++dev->port.count > 1) { + spin_unlock_irqrestore(&dev->port.lock, flags); return 0; + } + spin_unlock_irqrestore(&dev->port.lock, flags); dlc = dev->dlc; @@ -697,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) rfcomm_dlc_lock(dlc); tty->driver_data = dev; - dev->tty = tty; + dev->port.tty = tty; rfcomm_dlc_unlock(dlc); set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); @@ -744,13 +731,17 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + unsigned long flags; + if (!dev) return; BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, - atomic_read(&dev->opened)); + dev->port.count); - if (atomic_dec_and_test(&dev->opened)) { + spin_lock_irqsave(&dev->port.lock, flags); + if (!--dev->port.count) { + spin_unlock_irqrestore(&dev->port.lock, flags); if (dev->tty_dev->parent) device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); @@ -758,11 +749,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) rfcomm_dlc_close(dev->dlc, 0); clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); - cancel_work_sync(&dev->wakeup_task); rfcomm_dlc_lock(dev->dlc); tty->driver_data = NULL; - dev->tty = NULL; + dev->port.tty = NULL; rfcomm_dlc_unlock(dev->dlc); if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) { @@ -770,11 +760,12 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) list_del_init(&dev->list); spin_unlock(&rfcomm_dev_lock); - rfcomm_dev_put(dev); + tty_port_put(&dev->port); } - } + } else + spin_unlock_irqrestore(&dev->port.lock, flags); - rfcomm_dev_put(dev); + tty_port_put(&dev->port); } static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) @@ -1083,7 +1074,7 @@ static void rfcomm_tty_hangup(struct tty_struct *tty) if (rfcomm_dev_get(dev->id) == NULL) return; rfcomm_dev_del(dev); - rfcomm_dev_put(dev); + tty_port_put(&dev->port); } } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index f6ab1290796..cbdd313659a 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = { static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent); static void sco_chan_del(struct sock *sk, int err); -static int sco_conn_del(struct hci_conn *conn, int err); - static void sco_sock_close(struct sock *sk); static void sco_sock_kill(struct sock *sk); @@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk) } /* ---- SCO connections ---- */ -static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) +static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; - if (conn || status) + if (conn) return conn; conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); @@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk) else type = SCO_LINK; - hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); + hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW, + HCI_AT_NO_BONDING); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; } - conn = sco_conn_add(hcon, 0); + conn = sco_conn_add(hcon); if (!conn) { hci_conn_put(hcon); err = -ENOMEM; @@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) { struct sco_conn *conn = sco_pi(sk)->conn; struct sk_buff *skb; - int err, count; + int err; /* Check outgoing MTU */ if (len > conn->mtu) @@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) BT_DBG("sk %p len %d", sk, len); - count = min_t(unsigned int, conn->mtu, len); - skb = bt_skb_send_alloc(sk, count, - msg->msg_flags & MSG_DONTWAIT, &err); + skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return err; - if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { + if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); return -EFAULT; } hci_send_sco(conn->hcon, skb); - return count; + return len; } static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) @@ -277,17 +274,20 @@ drop: } /* -------- Socket interface ---------- */ -static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba) +static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) { - struct sock *sk; struct hlist_node *node; + struct sock *sk; + + sk_for_each(sk, node, &sco_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; - sk_for_each(sk, node, &sco_sk_list.head) if (!bacmp(&bt_sk(sk)->src, ba)) - goto found; - sk = NULL; -found: - return sk; + return sk; + } + + return NULL; } /* Find socket listening on source bdaddr. @@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; - bdaddr_t *src = &sa->sco_bdaddr; int err = 0; BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); @@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le goto done; } - write_lock(&sco_sk_list.lock); - - if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) { - err = -EADDRINUSE; - } else { - /* Save source address */ - bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); - sk->sk_state = BT_BOUND; + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; } - write_unlock(&sco_sk_list.lock); + bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); + + sk->sk_state = BT_BOUND; done: release_sock(sk); @@ -537,21 +533,38 @@ done: static int sco_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; + bdaddr_t *src = &bt_sk(sk)->src; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); - if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) { + if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + write_lock(&sco_sk_list.lock); + + if (__sco_get_sock_listen_by_addr(src)) { + err = -EADDRINUSE; + goto unlock; + } + sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; + sk->sk_state = BT_LISTEN; +unlock: + write_unlock(&sco_sk_list.lock); + done: release_sock(sk); return err; @@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status) if (!status) { struct sco_conn *conn; - conn = sco_conn_add(hcon, status); + conn = sco_conn_add(hcon); if (conn) sco_conn_ready(conn); } else diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index deb119875fd..37df4e9b389 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -648,7 +648,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; - ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); + ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); if (ret) return SMP_UNSPECIFIED; @@ -703,7 +703,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } -static u8 smp_ltk_encrypt(struct l2cap_conn *conn) +static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) { struct smp_ltk *key; struct hci_conn *hcon = conn->hcon; @@ -712,6 +712,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn) if (!key) return 0; + if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated) + return 0; + if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) return 1; @@ -732,7 +735,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); - if (smp_ltk_encrypt(conn)) + if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) return 0; if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) @@ -771,7 +774,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) return 1; if (hcon->link_mode & HCI_LM_MASTER) - if (smp_ltk_encrypt(conn)) + if (smp_ltk_encrypt(conn, sec_level)) goto done; if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) @@ -956,7 +959,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) HCI_SMP_LTK_SLAVE, 1, authenticated, enc.ltk, smp->enc_key_size, ediv, ident.rand); - ident.ediv = cpu_to_le16(ediv); + ident.ediv = ediv; smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ba829de8442..929e48aed44 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -170,7 +170,7 @@ static int br_set_mac_address(struct net_device *dev, void *p) return -EADDRNOTAVAIL; spin_lock_bh(&br->lock); - if (compare_ether_addr(dev->dev_addr, addr->sa_data)) { + if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); br_fdb_change_mac_address(br, addr->sa_data); @@ -317,6 +317,9 @@ static const struct net_device_ops br_netdev_ops = { .ndo_add_slave = br_add_slave, .ndo_del_slave = br_del_slave, .ndo_fix_features = br_fix_features, + .ndo_fdb_add = br_fdb_add, + .ndo_fdb_del = br_fdb_delete, + .ndo_fdb_dump = br_fdb_dump, }; static void br_dev_free(struct net_device *dev) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 5ba0c844d50..d21f3238351 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -107,8 +107,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) struct net_bridge_port *op; list_for_each_entry(op, &br->port_list, list) { if (op != p && - !compare_ether_addr(op->dev->dev_addr, - f->addr.addr)) { + ether_addr_equal(op->dev->dev_addr, + f->addr.addr)) { f->dst = op; goto insert; } @@ -214,8 +214,8 @@ void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *op; list_for_each_entry(op, &br->port_list, list) { if (op != p && - !compare_ether_addr(op->dev->dev_addr, - f->addr.addr)) { + ether_addr_equal(op->dev->dev_addr, + f->addr.addr)) { f->dst = op; goto skip_delete; } @@ -237,7 +237,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) { + if (ether_addr_equal(fdb->addr.addr, addr)) { if (unlikely(has_expired(br, fdb))) break; return fdb; @@ -331,7 +331,7 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry(fdb, h, head, hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) + if (ether_addr_equal(fdb->addr.addr, addr)) return fdb; } return NULL; @@ -344,7 +344,7 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry_rcu(fdb, h, head, hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) + if (ether_addr_equal(fdb->addr.addr, addr)) return fdb; } return NULL; @@ -487,14 +487,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; ndm->ndm_state = fdb_to_nud(fdb); - NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr); - + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr)) + goto nla_put_failure; ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ci.ndm_confirmed = 0; ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); ci.ndm_refcnt = 0; - NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); - + if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: @@ -535,44 +535,38 @@ errout: } /* Dump information about entries, in response to GETNEIGH */ -int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +int br_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) { - struct net *net = sock_net(skb->sk); - struct net_device *dev; - int idx = 0; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - struct net_bridge *br = netdev_priv(dev); - int i; + struct net_bridge *br = netdev_priv(dev); + int i; - if (!(dev->priv_flags & IFF_EBRIDGE)) - continue; + if (!(dev->priv_flags & IFF_EBRIDGE)) + goto out; - for (i = 0; i < BR_HASH_SIZE; i++) { - struct hlist_node *h; - struct net_bridge_fdb_entry *f; - - hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { - if (idx < cb->args[0]) - goto skip; + for (i = 0; i < BR_HASH_SIZE; i++) { + struct hlist_node *h; + struct net_bridge_fdb_entry *f; - if (fdb_fill_info(skb, br, f, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - RTM_NEWNEIGH, - NLM_F_MULTI) < 0) - break; + hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { + if (idx < cb->args[0]) + goto skip; + + if (fdb_fill_info(skb, br, f, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, + NLM_F_MULTI) < 0) + break; skip: - ++idx; - } + ++idx; } } - rcu_read_unlock(); - cb->args[0] = idx; - - return skb->len; +out: + return idx; } /* Update (create or replace) forwarding database entry */ @@ -614,43 +608,11 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, } /* Add new permanent fdb entry with RTM_NEWNEIGH */ -int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +int br_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 nlh_flags) { - struct net *net = sock_net(skb->sk); - struct ndmsg *ndm; - struct nlattr *tb[NDA_MAX+1]; - struct net_device *dev; struct net_bridge_port *p; - const __u8 *addr; - int err; - - ASSERT_RTNL(); - err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); - if (err < 0) - return err; - - ndm = nlmsg_data(nlh); - if (ndm->ndm_ifindex == 0) { - pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n"); - return -EINVAL; - } - - dev = __dev_get_by_index(net, ndm->ndm_ifindex); - if (dev == NULL) { - pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n"); - return -ENODEV; - } - - if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { - pr_info("bridge: RTM_NEWNEIGH with invalid address\n"); - return -EINVAL; - } - - addr = nla_data(tb[NDA_LLADDR]); - if (!is_valid_ether_addr(addr)) { - pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n"); - return -EINVAL; - } + int err = 0; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); @@ -670,14 +632,14 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) rcu_read_unlock(); } else { spin_lock_bh(&p->br->hash_lock); - err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); + err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags); spin_unlock_bh(&p->br->hash_lock); } return err; } -static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) +static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr) { struct net_bridge *br = p->br; struct hlist_head *head = &br->hash[br_mac_hash(addr)]; @@ -692,40 +654,12 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) } /* Remove neighbor entry with RTM_DELNEIGH */ -int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) { - struct net *net = sock_net(skb->sk); - struct ndmsg *ndm; struct net_bridge_port *p; - struct nlattr *llattr; - const __u8 *addr; - struct net_device *dev; int err; - ASSERT_RTNL(); - if (nlmsg_len(nlh) < sizeof(*ndm)) - return -EINVAL; - - ndm = nlmsg_data(nlh); - if (ndm->ndm_ifindex == 0) { - pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n"); - return -EINVAL; - } - - dev = __dev_get_by_index(net, ndm->ndm_ifindex); - if (dev == NULL) { - pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n"); - return -ENODEV; - } - - llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR); - if (llattr == NULL || nla_len(llattr) != ETH_ALEN) { - pr_info("bridge: RTM_DELNEIGH with invalid address\n"); - return -EINVAL; - } - - addr = nla_data(llattr); - p = br_port_get_rtnl(dev); if (p == NULL) { pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index a2098e3de50..e9466d41270 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -34,7 +34,7 @@ static inline int should_deliver(const struct net_bridge_port *p, p->state == BR_STATE_FORWARDING); } -static inline unsigned packet_length(const struct sk_buff *skb) +static inline unsigned int packet_length(const struct sk_buff *skb) { return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 0a942fbccc9..e1144e1617b 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name) return -ENOMEM; dev_net_set(dev, net); + dev->rtnl_link_ops = &br_link_ops; res = register_netdev(dev); if (res) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5a31731be4d..76f15fda021 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -216,7 +216,7 @@ forward: } /* fall through */ case BR_STATE_LEARNING: - if (!compare_ether_addr(p->br->dev->dev_addr, dest)) + if (ether_addr_equal(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 27ca25ed702..b66581208cb 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -36,6 +36,8 @@ #define mlock_dereference(X, br) \ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) +static void br_multicast_start_querier(struct net_bridge *br); + #if IS_ENABLED(CONFIG_IPV6) static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) { @@ -458,8 +460,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, hopopt[3] = 2; /* Length of RA Option */ hopopt[4] = 0; /* Type = 0x0000 (MLD) */ hopopt[5] = 0; - hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ - hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ + hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ + hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ skb_put(skb, sizeof(*ip6h) + 8); @@ -512,8 +514,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p; - unsigned count = 0; - unsigned max; + unsigned int count = 0; + unsigned int max; int elasticity; int err; @@ -740,6 +742,20 @@ static void br_multicast_local_router_expired(unsigned long data) { } +static void br_multicast_querier_expired(unsigned long data) +{ + struct net_bridge *br = (void *)data; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || br->multicast_disabled) + goto out; + + br_multicast_start_querier(br); + +out: + spin_unlock(&br->multicast_lock); +} + static void __br_multicast_send_query(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *ip) @@ -766,6 +782,7 @@ static void br_multicast_send_query(struct net_bridge *br, struct br_ip br_group; if (!netif_running(br->dev) || br->multicast_disabled || + !br->multicast_querier || timer_pending(&br->multicast_querier_timer)) return; @@ -1281,8 +1298,8 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, struct sk_buff *skb2 = skb; const struct iphdr *iph; struct igmphdr *ih; - unsigned len; - unsigned offset; + unsigned int len; + unsigned int offset; int err; /* We treat OOM as packet loss for now. */ @@ -1382,7 +1399,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, u8 icmp6_type; u8 nexthdr; __be16 frag_off; - unsigned len; + unsigned int len; int offset; int err; @@ -1548,6 +1565,7 @@ void br_multicast_init(struct net_bridge *br) br->hash_max = 512; br->multicast_router = 1; + br->multicast_querier = 0; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2; @@ -1562,7 +1580,7 @@ void br_multicast_init(struct net_bridge *br) setup_timer(&br->multicast_router_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_querier_timer, - br_multicast_local_router_expired, 0); + br_multicast_querier_expired, (unsigned long)br); setup_timer(&br->multicast_query_timer, br_multicast_query_expired, (unsigned long)br); } @@ -1689,9 +1707,23 @@ unlock: return err; } -int br_multicast_toggle(struct net_bridge *br, unsigned long val) +static void br_multicast_start_querier(struct net_bridge *br) { struct net_bridge_port *port; + + br_multicast_open(br); + + list_for_each_entry(port, &br->port_list, list) { + if (port->state == BR_STATE_DISABLED || + port->state == BR_STATE_BLOCKING) + continue; + + __br_multicast_enable_port(port); + } +} + +int br_multicast_toggle(struct net_bridge *br, unsigned long val) +{ int err = 0; struct net_bridge_mdb_htable *mdb; @@ -1721,14 +1753,7 @@ rollback: goto rollback; } - br_multicast_open(br); - list_for_each_entry(port, &br->port_list, list) { - if (port->state == BR_STATE_DISABLED || - port->state == BR_STATE_BLOCKING) - continue; - - __br_multicast_enable_port(port); - } + br_multicast_start_querier(br); unlock: spin_unlock_bh(&br->multicast_lock); @@ -1736,6 +1761,24 @@ unlock: return err; } +int br_multicast_set_querier(struct net_bridge *br, unsigned long val) +{ + val = !!val; + + spin_lock_bh(&br->multicast_lock); + if (br->multicast_querier == val) + goto unlock; + + br->multicast_querier = val; + if (val) + br_multicast_start_querier(br); + +unlock: + spin_unlock_bh(&br->multicast_lock); + + return 0; +} + int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) { int err = -ENOENT; diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index d7f49b63ab0..e41456bd3cc 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -54,12 +54,14 @@ static int brnf_call_ip6tables __read_mostly = 1; static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 0; static int brnf_filter_pppoe_tagged __read_mostly = 0; +static int brnf_pass_vlan_indev __read_mostly = 0; #else #define brnf_call_iptables 1 #define brnf_call_ip6tables 1 #define brnf_call_arptables 1 #define brnf_filter_vlan_tagged 0 #define brnf_filter_pppoe_tagged 0 +#define brnf_pass_vlan_indev 0 #endif #define IS_IP(skb) \ @@ -503,6 +505,19 @@ bridged_dnat: return 0; } +static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev) +{ + struct net_device *vlan, *br; + + br = bridge_parent(dev); + if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) + return br; + + vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK); + + return vlan ? vlan : br; +} + /* Some common code for IPv4/IPv6 */ static struct net_device *setup_pre_routing(struct sk_buff *skb) { @@ -515,7 +530,7 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb) nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; nf_bridge->physindev = skb->dev; - skb->dev = bridge_parent(skb->dev); + skb->dev = brnf_get_logical_dev(skb, skb->dev); if (skb->protocol == htons(ETH_P_8021Q)) nf_bridge->mask |= BRNF_8021Q; else if (skb->protocol == htons(ETH_P_PPP_SES)) @@ -543,7 +558,7 @@ static int check_hbh_len(struct sk_buff *skb) int optlen = nh[off + 1] + 2; switch (nh[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; @@ -774,7 +789,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, else skb->protocol = htons(ETH_P_IPV6); - NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, + NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent, br_nf_forward_finish); return NF_STOLEN; @@ -1002,12 +1017,13 @@ static ctl_table brnf_table[] = { .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, - { } -}; - -static struct ctl_path brnf_path[] = { - { .procname = "net", }, - { .procname = "bridge", }, + { + .procname = "bridge-nf-pass-vlan-input-dev", + .data = &brnf_pass_vlan_indev, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = brnf_sysctl_call_tables, + }, { } }; #endif @@ -1026,7 +1042,7 @@ int __init br_netfilter_init(void) return ret; } #ifdef CONFIG_SYSCTL - brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); + brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); @@ -1043,7 +1059,7 @@ void br_netfilter_fini(void) { nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); #ifdef CONFIG_SYSCTL - unregister_sysctl_table(brnf_sysctl_header); + unregister_net_sysctl_table(brnf_sysctl_header); #endif dst_entries_destroy(&fake_dst_ops); } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index a1daf8227ed..fe41260fbf3 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -60,20 +60,17 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex); - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate); - - if (dev->addr_len) - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); - - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); - - if (event == RTM_NEWLINK) - NLA_PUT_U8(skb, IFLA_PROTINFO, port->state); - + if (nla_put_string(skb, IFLA_IFNAME, dev->name) || + nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || + nla_put_u32(skb, IFLA_MTU, dev->mtu) || + nla_put_u8(skb, IFLA_OPERSTATE, operstate) || + (dev->addr_len && + nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || + (dev->ifindex != dev->iflink && + nla_put_u32(skb, IFLA_LINK, dev->iflink)) || + (event == RTM_NEWLINK && + nla_put_u8(skb, IFLA_PROTINFO, port->state))) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: @@ -91,7 +88,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) int err = -ENOBUFS; br_debug(port->br, "port %u(%s) event %d\n", - (unsigned)port->port_no, port->dev->name, event); + (unsigned int)port->port_no, port->dev->name, event); skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) @@ -211,7 +208,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } -static struct rtnl_link_ops br_link_ops __read_mostly = { +struct rtnl_link_ops br_link_ops __read_mostly = { .kind = "bridge", .priv_size = sizeof(struct net_bridge), .setup = br_dev_setup, @@ -235,18 +232,6 @@ int __init br_netlink_init(void) br_rtm_setlink, NULL, NULL); if (err) goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, - br_fdb_add, NULL, NULL); - if (err) - goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, - br_fdb_delete, NULL, NULL); - if (err) - goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, - NULL, br_fdb_dump, NULL); - if (err) - goto err3; return 0; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e1d88225787..a768b2408ed 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -224,6 +224,7 @@ struct net_bridge unsigned char multicast_router; u8 multicast_disabled:1; + u8 multicast_querier:1; u32 hash_elasticity; u32 hash_max; @@ -359,9 +360,18 @@ extern int br_fdb_insert(struct net_bridge *br, extern void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr); -extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb); -extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); -extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); + +extern int br_fdb_delete(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr); +extern int br_fdb_add(struct ndmsg *nlh, + struct net_device *dev, + unsigned char *addr, + u16 nlh_flags); +extern int br_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx); /* br_forward.c */ extern void br_deliver(const struct net_bridge_port *to, @@ -417,6 +427,7 @@ extern int br_multicast_set_router(struct net_bridge *br, unsigned long val); extern int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val); extern int br_multicast_toggle(struct net_bridge *br, unsigned long val); +extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val); extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val); static inline bool br_multicast_is_router(struct net_bridge *br) @@ -538,6 +549,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr) #endif /* br_netlink.c */ +extern struct rtnl_link_ops br_link_ops; extern int br_netlink_init(void); extern void br_netlink_fini(void); extern void br_ifinfo_notify(int event, struct net_bridge_port *port); diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h index 05ed9bc7e42..0c0fe36e7aa 100644 --- a/net/bridge/br_private_stp.h +++ b/net/bridge/br_private_stp.h @@ -29,10 +29,9 @@ #define BR_MIN_PATH_COST 1 #define BR_MAX_PATH_COST 65535 -struct br_config_bpdu -{ - unsigned topology_change:1; - unsigned topology_change_ack:1; +struct br_config_bpdu { + unsigned int topology_change:1; + unsigned int topology_change_ack:1; bridge_id root; int root_path_cost; bridge_id bridge_id; diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 8c836d96ba7..af9a12099ba 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -32,7 +32,7 @@ static const char *const br_port_state_names[] = { void br_log_state(const struct net_bridge_port *p) { br_info(p->br, "port %u(%s) entered %s state\n", - (unsigned) p->port_no, p->dev->name, + (unsigned int) p->port_no, p->dev->name, br_port_state_names[p->state]); } @@ -478,7 +478,7 @@ void br_received_tcn_bpdu(struct net_bridge_port *p) { if (br_is_designated_port(p)) { br_info(p->br, "port %u(%s) received tcn bpdu\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); br_topology_change_detection(p->br); br_topology_change_acknowledge(p); diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index e16aade51ae..fd30a6022de 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -167,7 +167,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, if (p->state == BR_STATE_DISABLED) goto out; - if (compare_ether_addr(dest, br->group_addr) != 0) + if (!ether_addr_equal(dest, br->group_addr)) goto out; buf = skb_pull(skb, 3); diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index f494496373d..9d5a414a394 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -178,7 +178,7 @@ void br_stp_set_enabled(struct net_bridge *br, unsigned long val) /* called under bridge lock */ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) { - /* should be aligned on 2 bytes for compare_ether_addr() */ + /* should be aligned on 2 bytes for ether_addr_equal() */ unsigned short oldaddr_aligned[ETH_ALEN >> 1]; unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; struct net_bridge_port *p; @@ -191,12 +191,11 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) memcpy(br->dev->dev_addr, addr, ETH_ALEN); list_for_each_entry(p, &br->port_list, list) { - if (!compare_ether_addr(p->designated_bridge.addr, oldaddr)) + if (ether_addr_equal(p->designated_bridge.addr, oldaddr)) memcpy(p->designated_bridge.addr, addr, ETH_ALEN); - if (!compare_ether_addr(p->designated_root.addr, oldaddr)) + if (ether_addr_equal(p->designated_root.addr, oldaddr)) memcpy(p->designated_root.addr, addr, ETH_ALEN); - } br_configuration_update(br); @@ -205,7 +204,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) br_become_root_bridge(br); } -/* should be aligned on 2 bytes for compare_ether_addr() */ +/* should be aligned on 2 bytes for ether_addr_equal() */ static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; /* called under bridge lock */ @@ -227,7 +226,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br) } - if (compare_ether_addr(br->bridge_id.addr, addr) == 0) + if (ether_addr_equal(br->bridge_id.addr, addr)) return false; /* no change */ br_stp_change_bridge_id(br, addr); diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 58de2a0f997..a6747e67342 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -56,7 +56,7 @@ static void br_message_age_timer_expired(unsigned long arg) return; br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", - (unsigned) p->port_no, p->dev->name, + (unsigned int) p->port_no, p->dev->name, id->prio[0], id->prio[1], &id->addr); /* @@ -84,7 +84,7 @@ static void br_forward_delay_timer_expired(unsigned long arg) struct net_bridge *br = p->br; br_debug(br, "port %u(%s) forward delay timer\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); spin_lock(&br->lock); if (p->state == BR_STATE_LISTENING) { p->state = BR_STATE_LEARNING; @@ -131,7 +131,7 @@ static void br_hold_timer_expired(unsigned long arg) struct net_bridge_port *p = (struct net_bridge_port *) arg; br_debug(p->br, "port %u(%s) hold timer expired\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); spin_lock(&p->br->lock); if (p->config_pending) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index c236c0e4398..c5c059333ea 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -297,7 +297,7 @@ static ssize_t store_group_addr(struct device *d, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); - unsigned new_addr[6]; + unsigned int new_addr[6]; int i; if (!capable(CAP_NET_ADMIN)) @@ -379,6 +379,23 @@ static ssize_t store_multicast_snooping(struct device *d, static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, show_multicast_snooping, store_multicast_snooping); +static ssize_t show_multicast_querier(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%d\n", br->multicast_querier); +} + +static ssize_t store_multicast_querier(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_set_querier); +} +static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR, + show_multicast_querier, store_multicast_querier); + static ssize_t show_hash_elasticity(struct device *d, struct device_attribute *attr, char *buf) { @@ -702,6 +719,7 @@ static struct attribute *bridge_attrs[] = { #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &dev_attr_multicast_router.attr, &dev_attr_multicast_snooping.attr, + &dev_attr_multicast_querier.attr, &dev_attr_hash_elasticity.attr, &dev_attr_hash_max.attr, &dev_attr_multicast_last_member_count.attr, diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c index 5b33a2e634a..071d87214dd 100644 --- a/net/bridge/netfilter/ebt_stp.c +++ b/net/bridge/netfilter/ebt_stp.c @@ -164,8 +164,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par) !(info->bitmask & EBT_STP_MASK)) return -EINVAL; /* Make sure the match only receives stp frames */ - if (compare_ether_addr(e->destmac, bridge_ula) || - compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) + if (!ether_addr_equal(e->destmac, bridge_ula) || + !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) return -EINVAL; return 0; diff --git a/net/caif/Kconfig b/net/caif/Kconfig index 936361e5a2b..d3694953b1d 100644 --- a/net/caif/Kconfig +++ b/net/caif/Kconfig @@ -25,7 +25,7 @@ config CAIF_DEBUG bool "Enable Debug" depends on CAIF default n - --- help --- + ---help--- Enable the inclusion of debug code in the CAIF stack. Be aware that doing this will impact performance. If unsure say N. diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index aa6f716524f..8c83c175b03 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -4,8 +4,7 @@ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 * - * Borrowed heavily from file: pn_dev.c. Thanks to - * Remi Denis-Courmont <remi.denis-courmont@nokia.com> + * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont * and Sakari Ailus <sakari.ailus@nokia.com> */ @@ -562,9 +561,9 @@ static int __init caif_device_init(void) static void __exit caif_device_exit(void) { - unregister_pernet_subsys(&caif_net_ops); unregister_netdevice_notifier(&caif_device_notifier); dev_remove_pack(&caif_packet_type); + unregister_pernet_subsys(&caif_net_ops); } module_init(caif_device_init); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 5016fa57b62..78f1cdad5b3 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -19,7 +19,7 @@ #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/caif/caif_socket.h> -#include <linux/atomic.h> +#include <linux/pkt_sched.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/caif/caif_layer.h> @@ -130,11 +130,10 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { - if (net_ratelimit()) - pr_debug("sending flow OFF (queue len = %d %d)\n", - atomic_read(&cf_sk->sk.sk_rmem_alloc), - sk_rcvbuf_lowwater(cf_sk)); + (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { + net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n", + atomic_read(&cf_sk->sk.sk_rmem_alloc), + sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } @@ -144,8 +143,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return err; if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); - if (net_ratelimit()) - pr_debug("sending flow OFF due to rmem_schedule\n"); + net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; @@ -222,6 +220,7 @@ static void caif_ctrl_cb(struct cflayer *layr, cfsk_hold, cfsk_put); cf_sk->sk.sk_state = CAIF_CONNECTED; set_tx_flow_on(cf_sk); + cf_sk->sk.sk_shutdown = 0; cf_sk->sk.sk_state_change(&cf_sk->sk); break; @@ -505,6 +504,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); memset(skb->cb, 0, sizeof(struct caif_payload_info)); + cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); if (cf_sk->layer.dn == NULL) { kfree_skb(skb); @@ -1062,6 +1062,18 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; + /* Initialize default priority for well-known cases */ + switch (protocol) { + case CAIFPROTO_AT: + sk->sk_priority = TC_PRIO_CONTROL; + break; + case CAIFPROTO_RFM: + sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; + break; + default: + sk->sk_priority = TC_PRIO_BESTEFFORT; + } + /* * Lock in order to try to stop someone from opening the socket * too early. @@ -1081,7 +1093,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, set_rx_flow_on(cf_sk); /* Set default options on configuration */ - cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; release_sock(&cf_sk->sk); diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 5cf52225692..047cd0eec02 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -9,6 +9,7 @@ #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/pkt_sched.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfctrl.h> @@ -189,6 +190,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) cfctrl->serv.dev_info.id = physlinkid; cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); cfpkt_addbdy(pkt, physlinkid); + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); dn->transmit(dn, pkt); } @@ -281,6 +283,7 @@ int cfctrl_linkup_request(struct cflayer *layer, * might arrive with the newly allocated channel ID. */ cfpkt_info(pkt)->dev_info->id = param->phyid; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); ret = dn->transmit(dn, pkt); if (ret < 0) { @@ -314,6 +317,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); cfpkt_addbdy(pkt, channelid); init_info(cfpkt_info(pkt), cfctrl); + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); ret = dn->transmit(dn, pkt); #ifndef CAIF_NO_LOOP diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index e335ba859b9..863dedd91bb 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -381,6 +381,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) memcpy(skb2->data, split, len2nd); skb2->tail += len2nd; skb2->len += len2nd; + skb2->priority = skb->priority; return skb_to_pkt(skb2); } @@ -394,3 +395,9 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } EXPORT_SYMBOL(cfpkt_info); + +void cfpkt_set_prio(struct cfpkt *pkt, int prio) +{ + pkt_to_skb(pkt)->priority = prio; +} +EXPORT_SYMBOL(cfpkt_set_prio); diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index 4aa33d4496b..dd485f6128e 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/pkt_sched.h> #include <net/caif/caif_layer.h> #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> @@ -120,6 +121,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); return layr->dn->transmit(layr->dn, pkt); } case CAIF_MODEMCMD_FLOW_OFF_REQ: @@ -140,6 +142,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); return layr->dn->transmit(layr->dn, pkt); } default: diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index d09340e1523..69771c04ba8 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -424,14 +424,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev) struct chnl_net *priv; u8 loop; priv = netdev_priv(dev); - NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, - priv->conn_req.sockaddr.u.dgm.connection_id); - NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, - priv->conn_req.sockaddr.u.dgm.connection_id); + if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID, + priv->conn_req.sockaddr.u.dgm.connection_id) || + nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID, + priv->conn_req.sockaddr.u.dgm.connection_id)) + goto nla_put_failure; loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; - NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); - - + if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop)) + goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; diff --git a/net/can/gw.c b/net/can/gw.c index 3d79b127881..b41acf25668 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -66,7 +66,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS("can-gw"); -HLIST_HEAD(cgw_list); +static HLIST_HEAD(cgw_list); static struct notifier_block notifier; static struct kmem_cache *cgw_cache __read_mostly; diff --git a/net/can/raw.c b/net/can/raw.c index cde1b4a20f7..46cca3a91d1 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, if (err < 0) goto free_skb; - /* to be able to check the received tx sock reference in raw_rcv() */ - skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF; - skb->dev = dev; skb->sk = sk; diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c index 214c2bb43d6..925ca583c09 100644 --- a/net/ceph/auth_none.c +++ b/net/ceph/auth_none.c @@ -59,9 +59,7 @@ static int handle_reply(struct ceph_auth_client *ac, int result, */ static int ceph_auth_none_create_authorizer( struct ceph_auth_client *ac, int peer_type, - struct ceph_authorizer **a, - void **buf, size_t *len, - void **reply_buf, size_t *reply_len) + struct ceph_auth_handshake *auth) { struct ceph_auth_none_info *ai = ac->private; struct ceph_none_authorizer *au = &ai->au; @@ -82,11 +80,12 @@ static int ceph_auth_none_create_authorizer( dout("built authorizer len %d\n", au->buf_len); } - *a = (struct ceph_authorizer *)au; - *buf = au->buf; - *len = au->buf_len; - *reply_buf = au->reply_buf; - *reply_len = sizeof(au->reply_buf); + auth->authorizer = (struct ceph_authorizer *) au; + auth->authorizer_buf = au->buf; + auth->authorizer_buf_len = au->buf_len; + auth->authorizer_reply_buf = au->reply_buf; + auth->authorizer_reply_buf_len = sizeof (au->reply_buf); + return 0; bad2: diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 1587dc6010c..a16bf14eb02 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -526,9 +526,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, - struct ceph_authorizer **a, - void **buf, size_t *len, - void **reply_buf, size_t *reply_len) + struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; @@ -548,11 +546,12 @@ static int ceph_x_create_authorizer( return ret; } - *a = (struct ceph_authorizer *)au; - *buf = au->buf->vec.iov_base; - *len = au->buf->vec.iov_len; - *reply_buf = au->reply_buf; - *reply_len = sizeof(au->reply_buf); + auth->authorizer = (struct ceph_authorizer *) au; + auth->authorizer_buf = au->buf->vec.iov_base; + auth->authorizer_buf_len = au->buf->vec.iov_len; + auth->authorizer_reply_buf = au->reply_buf; + auth->authorizer_reply_buf_len = sizeof (au->reply_buf); + return 0; } diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h index e02da7a5c5a..f459e93b774 100644 --- a/net/ceph/auth_x.h +++ b/net/ceph/auth_x.h @@ -13,7 +13,7 @@ */ struct ceph_x_ticket_handler { struct rb_node node; - unsigned service; + unsigned int service; struct ceph_crypto_key session_key; struct ceph_timespec validity; @@ -27,7 +27,7 @@ struct ceph_x_ticket_handler { struct ceph_x_authorizer { struct ceph_buffer *buf; - unsigned service; + unsigned int service; u64 nonce; char reply_buf[128]; /* big enough for encrypted blob */ }; @@ -38,7 +38,7 @@ struct ceph_x_info { bool starting; u64 server_challenge; - unsigned have_keys; + unsigned int have_keys; struct rb_root ticket_handlers; struct ceph_x_authorizer auth_authorizer; diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index cc913193d99..ba4323bce0e 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -441,8 +441,8 @@ EXPORT_SYMBOL(ceph_client_id); * create a fresh client instance */ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, - unsigned supported_features, - unsigned required_features) + unsigned int supported_features, + unsigned int required_features) { struct ceph_client *client; struct ceph_entity_addr *myaddr = NULL; @@ -504,13 +504,6 @@ void ceph_destroy_client(struct ceph_client *client) /* unmount */ ceph_osdc_stop(&client->osdc); - /* - * make sure osd connections close out before destroying the - * auth module, which is needed to free those connections' - * ceph_authorizers. - */ - ceph_msgr_flush(); - ceph_monc_stop(&client->monc); ceph_debugfs_client_cleanup(client); diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 0a1b53bce76..67bb1f11e61 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -20,7 +20,7 @@ c = c - a; c = c - b; c = c ^ (b >> 15); \ } while (0) -unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) +unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length) { const unsigned char *k = (const unsigned char *)str; __u32 a, b, c; /* the internal state */ @@ -81,7 +81,7 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) /* * linux dcache hash */ -unsigned ceph_str_hash_linux(const char *str, unsigned length) +unsigned int ceph_str_hash_linux(const char *str, unsigned int length) { unsigned long hash = 0; unsigned char c; @@ -94,7 +94,7 @@ unsigned ceph_str_hash_linux(const char *str, unsigned length) } -unsigned ceph_str_hash(int type, const char *s, unsigned len) +unsigned int ceph_str_hash(int type, const char *s, unsigned int len) { switch (type) { case CEPH_STR_HASH_LINUX: diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c index d6ebb13a18a..089613234f0 100644 --- a/net/ceph/crush/crush.c +++ b/net/ceph/crush/crush.c @@ -26,9 +26,9 @@ const char *crush_bucket_alg_name(int alg) * @b: bucket pointer * @p: item index in bucket */ -int crush_get_bucket_item_weight(struct crush_bucket *b, int p) +int crush_get_bucket_item_weight(const struct crush_bucket *b, int p) { - if (p >= b->size) + if ((__u32)p >= b->size) return 0; switch (b->alg) { @@ -37,38 +37,13 @@ int crush_get_bucket_item_weight(struct crush_bucket *b, int p) case CRUSH_BUCKET_LIST: return ((struct crush_bucket_list *)b)->item_weights[p]; case CRUSH_BUCKET_TREE: - if (p & 1) - return ((struct crush_bucket_tree *)b)->node_weights[p]; - return 0; + return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)]; case CRUSH_BUCKET_STRAW: return ((struct crush_bucket_straw *)b)->item_weights[p]; } return 0; } -/** - * crush_calc_parents - Calculate parent vectors for the given crush map. - * @map: crush_map pointer - */ -void crush_calc_parents(struct crush_map *map) -{ - int i, b, c; - - for (b = 0; b < map->max_buckets; b++) { - if (map->buckets[b] == NULL) - continue; - for (i = 0; i < map->buckets[b]->size; i++) { - c = map->buckets[b]->items[i]; - BUG_ON(c >= map->max_devices || - c < -map->max_buckets); - if (c >= 0) - map->device_parents[c] = map->buckets[b]->id; - else - map->bucket_parents[-1-c] = map->buckets[b]->id; - } - } -} - void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b) { kfree(b->h.perm); @@ -87,6 +62,8 @@ void crush_destroy_bucket_list(struct crush_bucket_list *b) void crush_destroy_bucket_tree(struct crush_bucket_tree *b) { + kfree(b->h.perm); + kfree(b->h.items); kfree(b->node_weights); kfree(b); } @@ -124,10 +101,9 @@ void crush_destroy_bucket(struct crush_bucket *b) */ void crush_destroy(struct crush_map *map) { - int b; - /* buckets */ if (map->buckets) { + __s32 b; for (b = 0; b < map->max_buckets; b++) { if (map->buckets[b] == NULL) continue; @@ -138,13 +114,12 @@ void crush_destroy(struct crush_map *map) /* rules */ if (map->rules) { + __u32 b; for (b = 0; b < map->max_rules; b++) kfree(map->rules[b]); kfree(map->rules); } - kfree(map->bucket_parents); - kfree(map->device_parents); kfree(map); } diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index b79747c4b64..d7edc24333b 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -20,6 +20,7 @@ #include <linux/crush/crush.h> #include <linux/crush/hash.h> +#include <linux/crush/mapper.h> /* * Implement the core CRUSH mapping algorithm. @@ -32,9 +33,9 @@ * @type: storage ruleset type (user defined) * @size: output set size */ -int crush_find_rule(struct crush_map *map, int ruleset, int type, int size) +int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size) { - int i; + __u32 i; for (i = 0; i < map->max_rules; i++) { if (map->rules[i] && @@ -68,11 +69,11 @@ int crush_find_rule(struct crush_map *map, int ruleset, int type, int size) static int bucket_perm_choose(struct crush_bucket *bucket, int x, int r) { - unsigned pr = r % bucket->size; - unsigned i, s; + unsigned int pr = r % bucket->size; + unsigned int i, s; /* start a new permutation if @x has changed */ - if (bucket->perm_x != x || bucket->perm_n == 0) { + if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) { dprintk("bucket %d new x=%d\n", bucket->id, x); bucket->perm_x = x; @@ -100,13 +101,13 @@ static int bucket_perm_choose(struct crush_bucket *bucket, for (i = 0; i < bucket->perm_n; i++) dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); while (bucket->perm_n <= pr) { - unsigned p = bucket->perm_n; + unsigned int p = bucket->perm_n; /* no point in swapping the final entry */ if (p < bucket->size - 1) { i = crush_hash32_3(bucket->hash, x, bucket->id, p) % (bucket->size - p); if (i) { - unsigned t = bucket->perm[p + i]; + unsigned int t = bucket->perm[p + i]; bucket->perm[p + i] = bucket->perm[p]; bucket->perm[p] = t; } @@ -152,8 +153,8 @@ static int bucket_list_choose(struct crush_bucket_list *bucket, return bucket->h.items[i]; } - BUG_ON(1); - return 0; + dprintk("bad list sums for bucket %d\n", bucket->h.id); + return bucket->h.items[0]; } @@ -219,7 +220,7 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket, static int bucket_straw_choose(struct crush_bucket_straw *bucket, int x, int r) { - int i; + __u32 i; int high = 0; __u64 high_draw = 0; __u64 draw; @@ -239,6 +240,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket, static int crush_bucket_choose(struct crush_bucket *in, int x, int r) { dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r); + BUG_ON(in->size == 0); switch (in->alg) { case CRUSH_BUCKET_UNIFORM: return bucket_uniform_choose((struct crush_bucket_uniform *)in, @@ -253,7 +255,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r) return bucket_straw_choose((struct crush_bucket_straw *)in, x, r); default: - BUG_ON(1); + dprintk("unknown bucket %d alg %d\n", in->id, in->alg); return in->items[0]; } } @@ -262,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r) * true if device is marked "out" (failed, fully offloaded) * of the cluster */ -static int is_out(struct crush_map *map, __u32 *weight, int item, int x) +static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x) { if (weight[item] >= 0x10000) return 0; @@ -287,16 +289,16 @@ static int is_out(struct crush_map *map, __u32 *weight, int item, int x) * @recurse_to_leaf: true if we want one device under each item of given type * @out2: second output vector for leaf items (if @recurse_to_leaf) */ -static int crush_choose(struct crush_map *map, +static int crush_choose(const struct crush_map *map, struct crush_bucket *bucket, - __u32 *weight, + const __u32 *weight, int x, int numrep, int type, int *out, int outpos, int firstn, int recurse_to_leaf, int *out2) { int rep; - int ftotal, flocal; + unsigned int ftotal, flocal; int retry_descent, retry_bucket, skip_rep; struct crush_bucket *in = bucket; int r; @@ -304,7 +306,7 @@ static int crush_choose(struct crush_map *map, int item = 0; int itemtype; int collide, reject; - const int orig_tries = 5; /* attempts before we fall back to search */ + const unsigned int orig_tries = 5; /* attempts before we fall back to search */ dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "", bucket->id, x, outpos, numrep); @@ -325,7 +327,7 @@ static int crush_choose(struct crush_map *map, r = rep; if (in->alg == CRUSH_BUCKET_UNIFORM) { /* be careful */ - if (firstn || numrep >= in->size) + if (firstn || (__u32)numrep >= in->size) /* r' = r + f_total */ r += ftotal; else if (in->size % numrep == 0) @@ -354,7 +356,11 @@ static int crush_choose(struct crush_map *map, item = bucket_perm_choose(in, x, r); else item = crush_bucket_choose(in, x, r); - BUG_ON(item >= map->max_devices); + if (item >= map->max_devices) { + dprintk(" bad item %d\n", item); + skip_rep = 1; + break; + } /* desired type? */ if (item < 0) @@ -365,8 +371,12 @@ static int crush_choose(struct crush_map *map, /* keep going? */ if (itemtype != type) { - BUG_ON(item >= 0 || - (-1-item) >= map->max_buckets); + if (item >= 0 || + (-1-item) >= map->max_buckets) { + dprintk(" bad item type %d\n", type); + skip_rep = 1; + break; + } in = map->buckets[-1-item]; retry_bucket = 1; continue; @@ -415,7 +425,7 @@ reject: if (collide && flocal < 3) /* retry locally a few times */ retry_bucket = 1; - else if (flocal < in->size + orig_tries) + else if (flocal <= in->size + orig_tries) /* exhaustive bucket search */ retry_bucket = 1; else if (ftotal < 20) @@ -425,7 +435,7 @@ reject: /* else give up */ skip_rep = 1; dprintk(" reject %d collide %d " - "ftotal %d flocal %d\n", + "ftotal %u flocal %u\n", reject, collide, ftotal, flocal); } @@ -454,15 +464,12 @@ reject: * @x: hash input * @result: pointer to result vector * @result_max: maximum result size - * @force: force initial replica choice; -1 for none */ -int crush_do_rule(struct crush_map *map, +int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, - int force, __u32 *weight) + const __u32 *weight) { int result_len; - int force_context[CRUSH_MAX_DEPTH]; - int force_pos = -1; int a[CRUSH_MAX_SET]; int b[CRUSH_MAX_SET]; int c[CRUSH_MAX_SET]; @@ -473,66 +480,44 @@ int crush_do_rule(struct crush_map *map, int osize; int *tmp; struct crush_rule *rule; - int step; + __u32 step; int i, j; int numrep; int firstn; - BUG_ON(ruleno >= map->max_rules); + if ((__u32)ruleno >= map->max_rules) { + dprintk(" bad ruleno %d\n", ruleno); + return 0; + } rule = map->rules[ruleno]; result_len = 0; w = a; o = b; - /* - * determine hierarchical context of force, if any. note - * that this may or may not correspond to the specific types - * referenced by the crush rule. - */ - if (force >= 0 && - force < map->max_devices && - map->device_parents[force] != 0 && - !is_out(map, weight, force, x)) { - while (1) { - force_context[++force_pos] = force; - if (force >= 0) - force = map->device_parents[force]; - else - force = map->bucket_parents[-1-force]; - if (force == 0) - break; - } - } - for (step = 0; step < rule->len; step++) { + struct crush_rule_step *curstep = &rule->steps[step]; + firstn = 0; - switch (rule->steps[step].op) { + switch (curstep->op) { case CRUSH_RULE_TAKE: - w[0] = rule->steps[step].arg1; - - /* find position in force_context/hierarchy */ - while (force_pos >= 0 && - force_context[force_pos] != w[0]) - force_pos--; - /* and move past it */ - if (force_pos >= 0) - force_pos--; - + w[0] = curstep->arg1; wsize = 1; break; case CRUSH_RULE_CHOOSE_LEAF_FIRSTN: case CRUSH_RULE_CHOOSE_FIRSTN: firstn = 1; + /* fall through */ case CRUSH_RULE_CHOOSE_LEAF_INDEP: case CRUSH_RULE_CHOOSE_INDEP: - BUG_ON(wsize == 0); + if (wsize == 0) + break; recurse_to_leaf = - rule->steps[step].op == + curstep->op == CRUSH_RULE_CHOOSE_LEAF_FIRSTN || - rule->steps[step].op == + curstep->op == CRUSH_RULE_CHOOSE_LEAF_INDEP; /* reset output */ @@ -544,32 +529,18 @@ int crush_do_rule(struct crush_map *map, * basically, numrep <= 0 means relative to * the provided result_max */ - numrep = rule->steps[step].arg1; + numrep = curstep->arg1; if (numrep <= 0) { numrep += result_max; if (numrep <= 0) continue; } j = 0; - if (osize == 0 && force_pos >= 0) { - /* skip any intermediate types */ - while (force_pos && - force_context[force_pos] < 0 && - rule->steps[step].arg2 != - map->buckets[-1 - - force_context[force_pos]]->type) - force_pos--; - o[osize] = force_context[force_pos]; - if (recurse_to_leaf) - c[osize] = force_context[0]; - j++; - force_pos--; - } osize += crush_choose(map, map->buckets[-1-w[i]], weight, x, numrep, - rule->steps[step].arg2, + curstep->arg2, o+osize, j, firstn, recurse_to_leaf, c+osize); @@ -596,7 +567,9 @@ int crush_do_rule(struct crush_map *map, break; default: - BUG_ON(1); + dprintk(" unknown op %d at step %d\n", + curstep->op, step); + break; } } return result_len; diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 27d4ea315d1..54b531a0112 100644 --- a/net/ceph/debugfs.c +++ b/ |