forked from freifunk-franken/firmware
6730 lines
205 KiB
Diff
6730 lines
205 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 5762721..eac6bfc 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -18,7 +18,11 @@
|
|
# 02110-1301, USA
|
|
#
|
|
|
|
-
|
|
+# changing the CONFIG_* line to 'y' enables the related feature
|
|
+# B.A.T.M.A.N. debugging:
|
|
+export CONFIG_BATMAN_ADV_DEBUG=n
|
|
+# B.A.T.M.A.N. bridge loop avoidance:
|
|
+export CONFIG_BATMAN_ADV_BLA=y
|
|
|
|
PWD:=$(shell pwd)
|
|
KERNELPATH ?= /lib/modules/$(shell uname -r)/build
|
|
@@ -28,17 +32,30 @@ $(warning $(KERNELPATH) is missing, please set KERNELPATH)
|
|
endif
|
|
|
|
export KERNELPATH
|
|
+RM ?= rm -f
|
|
|
|
-REVISION= $(shell if [ -d .git ]; then \
|
|
- echo $$(git describe --always --dirty --match "v*" |sed 's/^v//' 2> /dev/null || echo "[unknown]"); \
|
|
+REVISION= $(shell if [ -d "$(PWD)/.git" ]; then \
|
|
+ echo $$(git --git-dir="$(PWD)/.git" describe --always --dirty --match "v*" |sed 's/^v//' 2> /dev/null || echo "[unknown]"); \
|
|
fi)
|
|
|
|
-NUM_CPUS = $(shell nproc 2> /dev/null || echo 1)
|
|
-
|
|
+CONFIG_BATMAN_ADV=m
|
|
+batman-adv-y += compat.o
|
|
+ifneq ($(REVISION),)
|
|
+ccflags-y += -DSOURCE_VERSION=\"$(REVISION)\"
|
|
+endif
|
|
include $(PWD)/Makefile.kbuild
|
|
|
|
-all:
|
|
- $(MAKE) -C $(KERNELPATH) REVISION=$(REVISION) M=$(PWD) PWD=$(PWD) -j $(NUM_CPUS) modules
|
|
+all: config
|
|
+ $(MAKE) -C $(KERNELPATH) M=$(PWD) PWD=$(PWD) modules
|
|
|
|
clean:
|
|
+ $(RM) compat-autoconf.h*
|
|
$(MAKE) -C $(KERNELPATH) M=$(PWD) PWD=$(PWD) clean
|
|
+
|
|
+install: config
|
|
+ $(MAKE) -C $(KERNELPATH) M=$(PWD) PWD=$(PWD) INSTALL_MOD_DIR=kernel/net/batman-adv/ modules_install
|
|
+
|
|
+config:
|
|
+ $(PWD)/gen-compat-autoconf.sh $(PWD)/compat-autoconf.h
|
|
+
|
|
+.PHONY: all clean install config
|
|
diff --git a/Makefile.kbuild b/Makefile.kbuild
|
|
index 6377c17..525df15 100644
|
|
--- a/Makefile.kbuild
|
|
+++ b/Makefile.kbuild
|
|
@@ -18,24 +18,12 @@
|
|
# 02110-1301, USA
|
|
#
|
|
|
|
-
|
|
-
|
|
-# openwrt integration
|
|
-ifeq ($(MAKING_MODULES),1)
|
|
--include $(TOPDIR)/Rules.make
|
|
-endif
|
|
-
|
|
-# EXTRA_CFLAGS += -DCONFIG_BATMAN_ADV_DEBUG
|
|
-
|
|
-ifneq ($(REVISION),)
|
|
-EXTRA_CFLAGS += -DSOURCE_VERSION=\"$(REVISION)\"
|
|
-endif
|
|
-
|
|
-obj-m += batman-adv.o
|
|
+obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
|
|
batman-adv-y += bat_debugfs.o
|
|
batman-adv-y += bat_iv_ogm.o
|
|
batman-adv-y += bat_sysfs.o
|
|
batman-adv-y += bitarray.o
|
|
+batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
|
|
batman-adv-y += gateway_client.o
|
|
batman-adv-y += gateway_common.o
|
|
batman-adv-y += hard-interface.o
|
|
@@ -50,4 +38,3 @@ batman-adv-y += soft-interface.o
|
|
batman-adv-y += translation-table.o
|
|
batman-adv-y += unicast.o
|
|
batman-adv-y += vis.o
|
|
-batman-adv-y += compat.o
|
|
diff --git a/README b/README
|
|
index c50cc0a..0c7a3c8 100644
|
|
--- a/README
|
|
+++ b/README
|
|
@@ -1,5 +1,3 @@
|
|
-[state: 13-11-2011]
|
|
-
|
|
BATMAN-ADV
|
|
----------
|
|
|
|
@@ -17,22 +15,7 @@ are: IPv4, IPv6, DHCP, IPX.
|
|
Batman advanced was implemented as a Linux kernel driver to re-
|
|
duce the overhead to a minimum. It does not depend on any (other)
|
|
network driver, and can be used on wifi as well as ethernet lan,
|
|
-vpn, etc ... (anything with ethernet-style layer 2). It compiles
|
|
-against and should work with Linux 2.6.29 - 3.2. Supporting
|
|
-older versions is not planned, but it's probably easy to backport
|
|
-it. If you work on a backport, feel free to contact us. :-)
|
|
-
|
|
-
|
|
-COMPILE
|
|
--------
|
|
-
|
|
-To compile against your currently installed kernel, just type:
|
|
-
|
|
-# make
|
|
-
|
|
-if you want to compile against some other kernel, use:
|
|
-
|
|
-# make KERNELPATH=/path/to/kernel
|
|
+vpn, etc ... (anything with ethernet-style layer 2).
|
|
|
|
|
|
CONFIGURATION
|
|
@@ -82,17 +65,18 @@ To deactivate an interface you have to write "none" into its
|
|
All mesh wide settings can be found in batman's own interface
|
|
folder:
|
|
|
|
-# ls /sys/class/net/bat0/mesh/
|
|
-# aggregated_ogms fragmentation gw_sel_class vis_mode
|
|
-# ap_isolation gw_bandwidth hop_penalty
|
|
-# bonding gw_mode orig_interval
|
|
+# ls /sys/class/net/bat0/mesh/
|
|
+# aggregated_ogms fragmentation hop_penalty
|
|
+# ap_isolation gw_bandwidth log_level
|
|
+# bonding gw_mode orig_interval
|
|
+# bridge_loop_avoidance gw_sel_class vis_mode
|
|
+
|
|
|
|
There is a special folder for debugging information:
|
|
|
|
# ls /sys/kernel/debug/batman_adv/bat0/
|
|
-# gateways socket transtable_global vis_data
|
|
-# originators softif_neigh transtable_local
|
|
-
|
|
+# bla_claim_table log socket transtable_local
|
|
+# gateways originators transtable_global vis_data
|
|
|
|
Some of the files contain all sort of status information regard-
|
|
ing the mesh network. For example, you can view the table of
|
|
@@ -202,11 +186,7 @@ When investigating problems with your mesh network it is some-
|
|
times necessary to see more detail debug messages. This must be
|
|
enabled when compiling the batman-adv module. When building bat-
|
|
man-adv as part of kernel, use "make menuconfig" and enable the
|
|
-option "B.A.T.M.A.N. debugging". When compiling outside of the
|
|
-kernel tree it is necessary to edit the file Makefile.kbuild and
|
|
-uncomment the line
|
|
-
|
|
-#EXTRA_CFLAGS += -DCONFIG_BATMAN_ADV_DEBUG
|
|
+option "B.A.T.M.A.N. debugging".
|
|
|
|
Those additional debug messages can be accessed using a special
|
|
file in debugfs
|
|
@@ -220,12 +200,13 @@ abled during run time. Following log_levels are defined:
|
|
1 - Enable messages related to routing / flooding / broadcasting
|
|
2 - Enable messages related to route added / changed / deleted
|
|
4 - Enable messages related to translation table operations
|
|
-7 - Enable all messages
|
|
+8 - Enable messages related to bridge loop avoidance
|
|
+15 - enable all messages
|
|
|
|
The debug output can be changed at runtime using the file
|
|
/sys/class/net/bat0/mesh/log_level. e.g.
|
|
|
|
-# echo 2 > /sys/class/net/bat0/mesh/log_level
|
|
+# echo 6 > /sys/class/net/bat0/mesh/log_level
|
|
|
|
will enable debug messages for when routes change.
|
|
|
|
diff --git a/README.external b/README.external
|
|
new file mode 100644
|
|
index 0000000..4a3a504
|
|
--- /dev/null
|
|
+++ b/README.external
|
|
@@ -0,0 +1,46 @@
|
|
+BATMAN-ADV external module
|
|
+--------------------------
|
|
+
|
|
+The batman-adv module is shipped as part of the Linux kernel
|
|
+and as external module. The external module allows to get
|
|
+new features without upgrading to a newer kernel version
|
|
+and to get batman-adv specific bugfixes for kernels that are
|
|
+not supported anymore. It compiles against and should work
|
|
+with Linux 2.6.29 - 3.2. Supporting older versions is not
|
|
+planned, but it's probably easy to backport it. If you work on a
|
|
+backport, feel free to contact us. :-)
|
|
+
|
|
+COMPILE
|
|
+-------
|
|
+
|
|
+To compile against your currently installed kernel, just type:
|
|
+
|
|
+# make
|
|
+
|
|
+if you want to compile against some other kernel, use:
|
|
+
|
|
+# make KERNELPATH=/path/to/kernel
|
|
+
|
|
+if you wont to install this module:
|
|
+
|
|
+# sudo make install
|
|
+
|
|
+CONFIGURATION
|
|
+-------------
|
|
+
|
|
+The in-kernel module can be configured through
|
|
+menuconfig. When compiling outside of the kernel, tree it is
|
|
+necessary to configure it using the make options. Each
|
|
+option can be set to to y (enabled), n (disabled) or m (build as
|
|
+module). Available options and their possible values are
|
|
+(default marked with an "*")
|
|
+
|
|
+ * CONFIG_BATMAN_ADV_DEBUG=[y|n*] (B.A.T.M.A.N. debugging)
|
|
+ * CONFIG_BATMAN_ADV_BLA=[y*|n] (B.A.T.M.A.N. bridge loop avoidance)
|
|
+
|
|
+e.g., debugging can be enabled by
|
|
+
|
|
+# make CONFIG_BATMAN_ADV_DEBUG=y
|
|
+
|
|
+Keep in mind that all options must also be added to "make
|
|
+install" call.
|
|
diff --git a/bat_algo.h b/bat_algo.h
|
|
new file mode 100644
|
|
index 0000000..755379f
|
|
--- /dev/null
|
|
+++ b/bat_algo.h
|
|
@@ -0,0 +1,27 @@
|
|
+/*
|
|
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
|
|
+ *
|
|
+ * Marek Lindner
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of version 2 of the GNU General Public
|
|
+ * License as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but
|
|
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
+ * 02110-1301, USA
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
|
|
+#define _NET_BATMAN_ADV_BAT_ALGO_H_
|
|
+
|
|
+int bat_iv_init(void);
|
|
+
|
|
+#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
|
|
diff --git a/bat_debugfs.c b/bat_debugfs.c
|
|
index d0af9bf..fe179fe 100644
|
|
--- a/bat_debugfs.c
|
|
+++ b/bat_debugfs.c
|
|
@@ -32,6 +32,7 @@
|
|
#include "soft-interface.h"
|
|
#include "vis.h"
|
|
#include "icmp_socket.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
|
|
static struct dentry *bat_debugfs;
|
|
|
|
@@ -221,6 +222,11 @@ static void debug_log_cleanup(struct bat_priv *bat_priv)
|
|
}
|
|
#endif
|
|
|
|
+static int bat_algorithms_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, bat_algo_seq_print_text, NULL);
|
|
+}
|
|
+
|
|
static int originators_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct net_device *net_dev = (struct net_device *)inode->i_private;
|
|
@@ -233,17 +239,19 @@ static int gateways_open(struct inode *inode, struct file *file)
|
|
return single_open(file, gw_client_seq_print_text, net_dev);
|
|
}
|
|
|
|
-static int softif_neigh_open(struct inode *inode, struct file *file)
|
|
+static int transtable_global_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct net_device *net_dev = (struct net_device *)inode->i_private;
|
|
- return single_open(file, softif_neigh_seq_print_text, net_dev);
|
|
+ return single_open(file, tt_global_seq_print_text, net_dev);
|
|
}
|
|
|
|
-static int transtable_global_open(struct inode *inode, struct file *file)
|
|
+#ifdef CONFIG_BATMAN_ADV_BLA
|
|
+static int bla_claim_table_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct net_device *net_dev = (struct net_device *)inode->i_private;
|
|
- return single_open(file, tt_global_seq_print_text, net_dev);
|
|
+ return single_open(file, bla_claim_table_seq_print_text, net_dev);
|
|
}
|
|
+#endif
|
|
|
|
static int transtable_local_open(struct inode *inode, struct file *file)
|
|
{
|
|
@@ -274,18 +282,23 @@ struct bat_debuginfo bat_debuginfo_##_name = { \
|
|
} \
|
|
};
|
|
|
|
+static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
|
|
static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
|
|
static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
|
|
-static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
|
|
static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
|
|
+#ifdef CONFIG_BATMAN_ADV_BLA
|
|
+static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
|
|
+#endif
|
|
static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
|
|
static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
|
|
|
|
static struct bat_debuginfo *mesh_debuginfos[] = {
|
|
&bat_debuginfo_originators,
|
|
&bat_debuginfo_gateways,
|
|
- &bat_debuginfo_softif_neigh,
|
|
&bat_debuginfo_transtable_global,
|
|
+#ifdef CONFIG_BATMAN_ADV_BLA
|
|
+ &bat_debuginfo_bla_claim_table,
|
|
+#endif
|
|
&bat_debuginfo_transtable_local,
|
|
&bat_debuginfo_vis_data,
|
|
NULL,
|
|
@@ -293,9 +306,25 @@ static struct bat_debuginfo *mesh_debuginfos[] = {
|
|
|
|
void debugfs_init(void)
|
|
{
|
|
+ struct bat_debuginfo *bat_debug;
|
|
+ struct dentry *file;
|
|
+
|
|
bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
|
|
if (bat_debugfs == ERR_PTR(-ENODEV))
|
|
bat_debugfs = NULL;
|
|
+
|
|
+ if (!bat_debugfs)
|
|
+ goto out;
|
|
+
|
|
+ bat_debug = &bat_debuginfo_routing_algos;
|
|
+ file = debugfs_create_file(bat_debug->attr.name,
|
|
+ S_IFREG | bat_debug->attr.mode,
|
|
+ bat_debugfs, NULL, &bat_debug->fops);
|
|
+ if (!file)
|
|
+ pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
|
|
+
|
|
+out:
|
|
+ return;
|
|
}
|
|
|
|
void debugfs_destroy(void)
|
|
diff --git a/bat_iv_ogm.c b/bat_iv_ogm.c
|
|
index 3512e25..1c483a5 100644
|
|
--- a/bat_iv_ogm.c
|
|
+++ b/bat_iv_ogm.c
|
|
@@ -20,7 +20,6 @@
|
|
*/
|
|
|
|
#include "main.h"
|
|
-#include "bat_ogm.h"
|
|
#include "translation-table.h"
|
|
#include "ring_buffer.h"
|
|
#include "originator.h"
|
|
@@ -29,8 +28,9 @@
|
|
#include "gateway_client.h"
|
|
#include "hard-interface.h"
|
|
#include "send.h"
|
|
+#include "bat_algo.h"
|
|
|
|
-void bat_ogm_init(struct hard_iface *hard_iface)
|
|
+static void bat_iv_ogm_init(struct hard_iface *hard_iface)
|
|
{
|
|
struct batman_ogm_packet *batman_ogm_packet;
|
|
|
|
@@ -38,25 +38,25 @@ void bat_ogm_init(struct hard_iface *hard_iface)
|
|
hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
|
|
|
|
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
|
|
- batman_ogm_packet->packet_type = BAT_OGM;
|
|
- batman_ogm_packet->version = COMPAT_VERSION;
|
|
+ batman_ogm_packet->header.packet_type = BAT_OGM;
|
|
+ batman_ogm_packet->header.version = COMPAT_VERSION;
|
|
+ batman_ogm_packet->header.ttl = 2;
|
|
batman_ogm_packet->flags = NO_FLAGS;
|
|
- batman_ogm_packet->ttl = 2;
|
|
batman_ogm_packet->tq = TQ_MAX_VALUE;
|
|
batman_ogm_packet->tt_num_changes = 0;
|
|
batman_ogm_packet->ttvn = 0;
|
|
}
|
|
|
|
-void bat_ogm_init_primary(struct hard_iface *hard_iface)
|
|
+static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface)
|
|
{
|
|
struct batman_ogm_packet *batman_ogm_packet;
|
|
|
|
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
|
|
batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
|
|
- batman_ogm_packet->ttl = TTL;
|
|
+ batman_ogm_packet->header.ttl = TTL;
|
|
}
|
|
|
|
-void bat_ogm_update_mac(struct hard_iface *hard_iface)
|
|
+static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
|
|
{
|
|
struct batman_ogm_packet *batman_ogm_packet;
|
|
|
|
@@ -68,7 +68,7 @@ void bat_ogm_update_mac(struct hard_iface *hard_iface)
|
|
}
|
|
|
|
/* when do we schedule our own ogm to be sent */
|
|
-static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
|
|
+static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
|
|
{
|
|
return jiffies + msecs_to_jiffies(
|
|
atomic_read(&bat_priv->orig_interval) -
|
|
@@ -76,7 +76,7 @@ static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
|
|
}
|
|
|
|
/* when do we schedule a ogm packet to be sent */
|
|
-static unsigned long bat_ogm_fwd_send_time(void)
|
|
+static unsigned long bat_iv_ogm_fwd_send_time(void)
|
|
{
|
|
return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
|
|
}
|
|
@@ -89,8 +89,8 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
|
|
}
|
|
|
|
/* is there another aggregated packet here? */
|
|
-static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
|
|
- int tt_num_changes)
|
|
+static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
|
|
+ int tt_num_changes)
|
|
{
|
|
int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
|
|
|
|
@@ -99,8 +99,8 @@ static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
|
|
}
|
|
|
|
/* send a batman ogm to a given interface */
|
|
-static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
|
|
- struct hard_iface *hard_iface)
|
|
+static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
|
|
+ struct hard_iface *hard_iface)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
|
char *fwd_str;
|
|
@@ -117,8 +117,8 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
|
|
batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
|
|
|
|
/* adjust all flags and log packets */
|
|
- while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
|
|
- batman_ogm_packet->tt_num_changes)) {
|
|
+ while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
|
|
+ batman_ogm_packet->tt_num_changes)) {
|
|
|
|
/* we might have aggregated direct link packets with an
|
|
* ordinary base packet */
|
|
@@ -137,7 +137,7 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
|
|
fwd_str, (packet_num > 0 ? "aggregated " : ""),
|
|
batman_ogm_packet->orig,
|
|
ntohl(batman_ogm_packet->seqno),
|
|
- batman_ogm_packet->tq, batman_ogm_packet->ttl,
|
|
+ batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
|
|
(batman_ogm_packet->flags & DIRECTLINK ?
|
|
"on" : "off"),
|
|
batman_ogm_packet->ttvn, hard_iface->net_dev->name,
|
|
@@ -157,7 +157,7 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
|
|
}
|
|
|
|
/* send a batman ogm packet */
|
|
-void bat_ogm_emit(struct forw_packet *forw_packet)
|
|
+static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
|
|
{
|
|
struct hard_iface *hard_iface;
|
|
struct net_device *soft_iface;
|
|
@@ -188,7 +188,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
|
|
|
|
/* multihomed peer assumed */
|
|
/* non-primary OGMs are only broadcasted on their interface */
|
|
- if ((directlink && (batman_ogm_packet->ttl == 1)) ||
|
|
+ if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
|
|
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
|
|
|
|
/* FIXME: what about aggregated packets ? */
|
|
@@ -198,7 +198,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
|
|
(forw_packet->own ? "Sending own" : "Forwarding"),
|
|
batman_ogm_packet->orig,
|
|
ntohl(batman_ogm_packet->seqno),
|
|
- batman_ogm_packet->ttl,
|
|
+ batman_ogm_packet->header.ttl,
|
|
forw_packet->if_incoming->net_dev->name,
|
|
forw_packet->if_incoming->net_dev->dev_addr);
|
|
|
|
@@ -216,7 +216,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
|
|
if (hard_iface->soft_iface != soft_iface)
|
|
continue;
|
|
|
|
- bat_ogm_send_to_if(forw_packet, hard_iface);
|
|
+ bat_iv_ogm_send_to_if(forw_packet, hard_iface);
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
@@ -226,13 +226,13 @@ out:
|
|
}
|
|
|
|
/* return true if new_packet can be aggregated with forw_packet */
|
|
-static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
|
|
+static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
|
|
*new_batman_ogm_packet,
|
|
- struct bat_priv *bat_priv,
|
|
- int packet_len, unsigned long send_time,
|
|
- bool directlink,
|
|
- const struct hard_iface *if_incoming,
|
|
- const struct forw_packet *forw_packet)
|
|
+ struct bat_priv *bat_priv,
|
|
+ int packet_len, unsigned long send_time,
|
|
+ bool directlink,
|
|
+ const struct hard_iface *if_incoming,
|
|
+ const struct forw_packet *forw_packet)
|
|
{
|
|
struct batman_ogm_packet *batman_ogm_packet;
|
|
int aggregated_bytes = forw_packet->packet_len + packet_len;
|
|
@@ -272,7 +272,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
|
|
* are flooded through the net */
|
|
if ((!directlink) &&
|
|
(!(batman_ogm_packet->flags & DIRECTLINK)) &&
|
|
- (batman_ogm_packet->ttl != 1) &&
|
|
+ (batman_ogm_packet->header.ttl != 1) &&
|
|
|
|
/* own packets originating non-primary
|
|
* interfaces leave only that interface */
|
|
@@ -285,7 +285,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
|
|
/* if the incoming packet is sent via this one
|
|
* interface only - we still can aggregate */
|
|
if ((directlink) &&
|
|
- (new_batman_ogm_packet->ttl == 1) &&
|
|
+ (new_batman_ogm_packet->header.ttl == 1) &&
|
|
(forw_packet->if_incoming == if_incoming) &&
|
|
|
|
/* packets from direct neighbors or
|
|
@@ -306,11 +306,11 @@ out:
|
|
}
|
|
|
|
/* create a new aggregated packet and add this packet to it */
|
|
-static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
|
|
- int packet_len, unsigned long send_time,
|
|
- bool direct_link,
|
|
- struct hard_iface *if_incoming,
|
|
- int own_packet)
|
|
+static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
|
|
+ int packet_len, unsigned long send_time,
|
|
+ bool direct_link,
|
|
+ struct hard_iface *if_incoming,
|
|
+ int own_packet)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
|
struct forw_packet *forw_packet_aggr;
|
|
@@ -385,9 +385,9 @@ out:
|
|
}
|
|
|
|
/* aggregate a new packet into the existing ogm packet */
|
|
-static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
|
|
- const unsigned char *packet_buff,
|
|
- int packet_len, bool direct_link)
|
|
+static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
|
|
+ const unsigned char *packet_buff,
|
|
+ int packet_len, bool direct_link)
|
|
{
|
|
unsigned char *skb_buff;
|
|
|
|
@@ -402,10 +402,10 @@ static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
|
|
(1 << forw_packet_aggr->num_packets);
|
|
}
|
|
|
|
-static void bat_ogm_queue_add(struct bat_priv *bat_priv,
|
|
- unsigned char *packet_buff,
|
|
- int packet_len, struct hard_iface *if_incoming,
|
|
- int own_packet, unsigned long send_time)
|
|
+static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
|
|
+ unsigned char *packet_buff,
|
|
+ int packet_len, struct hard_iface *if_incoming,
|
|
+ int own_packet, unsigned long send_time)
|
|
{
|
|
/**
|
|
* _aggr -> pointer to the packet we want to aggregate with
|
|
@@ -425,11 +425,11 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
|
|
if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
|
|
hlist_for_each_entry(forw_packet_pos, tmp_node,
|
|
&bat_priv->forw_bat_list, list) {
|
|
- if (bat_ogm_can_aggregate(batman_ogm_packet,
|
|
- bat_priv, packet_len,
|
|
- send_time, direct_link,
|
|
- if_incoming,
|
|
- forw_packet_pos)) {
|
|
+ if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
|
|
+ bat_priv, packet_len,
|
|
+ send_time, direct_link,
|
|
+ if_incoming,
|
|
+ forw_packet_pos)) {
|
|
forw_packet_aggr = forw_packet_pos;
|
|
break;
|
|
}
|
|
@@ -451,27 +451,27 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
|
|
(atomic_read(&bat_priv->aggregated_ogms)))
|
|
send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
|
|
|
|
- bat_ogm_aggregate_new(packet_buff, packet_len,
|
|
- send_time, direct_link,
|
|
- if_incoming, own_packet);
|
|
+ bat_iv_ogm_aggregate_new(packet_buff, packet_len,
|
|
+ send_time, direct_link,
|
|
+ if_incoming, own_packet);
|
|
} else {
|
|
- bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len,
|
|
- direct_link);
|
|
+ bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
|
|
+ packet_len, direct_link);
|
|
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
|
|
}
|
|
}
|
|
|
|
-static void bat_ogm_forward(struct orig_node *orig_node,
|
|
- const struct ethhdr *ethhdr,
|
|
- struct batman_ogm_packet *batman_ogm_packet,
|
|
- int directlink, struct hard_iface *if_incoming)
|
|
+static void bat_iv_ogm_forward(struct orig_node *orig_node,
|
|
+ const struct ethhdr *ethhdr,
|
|
+ struct batman_ogm_packet *batman_ogm_packet,
|
|
+ int directlink, struct hard_iface *if_incoming)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
|
struct neigh_node *router;
|
|
uint8_t in_tq, in_ttl, tq_avg = 0;
|
|
uint8_t tt_num_changes;
|
|
|
|
- if (batman_ogm_packet->ttl <= 1) {
|
|
+ if (batman_ogm_packet->header.ttl <= 1) {
|
|
bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
|
|
return;
|
|
}
|
|
@@ -479,10 +479,10 @@ static void bat_ogm_forward(struct orig_node *orig_node,
|
|
router = orig_node_get_router(orig_node);
|
|
|
|
in_tq = batman_ogm_packet->tq;
|
|
- in_ttl = batman_ogm_packet->ttl;
|
|
+ in_ttl = batman_ogm_packet->header.ttl;
|
|
tt_num_changes = batman_ogm_packet->tt_num_changes;
|
|
|
|
- batman_ogm_packet->ttl--;
|
|
+ batman_ogm_packet->header.ttl--;
|
|
memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
|
|
|
|
/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
|
|
@@ -494,7 +494,8 @@ static void bat_ogm_forward(struct orig_node *orig_node,
|
|
batman_ogm_packet->tq = router->tq_avg;
|
|
|
|
if (router->last_ttl)
|
|
- batman_ogm_packet->ttl = router->last_ttl - 1;
|
|
+ batman_ogm_packet->header.ttl =
|
|
+ router->last_ttl - 1;
|
|
}
|
|
|
|
tq_avg = router->tq_avg;
|
|
@@ -510,7 +511,7 @@ static void bat_ogm_forward(struct orig_node *orig_node,
|
|
"Forwarding packet: tq_orig: %i, tq_avg: %i, "
|
|
"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
|
|
in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
|
|
- batman_ogm_packet->ttl);
|
|
+ batman_ogm_packet->header.ttl);
|
|
|
|
batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
|
|
batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
|
|
@@ -522,12 +523,13 @@ static void bat_ogm_forward(struct orig_node *orig_node,
|
|
else
|
|
batman_ogm_packet->flags &= ~DIRECTLINK;
|
|
|
|
- bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
|
|
- BATMAN_OGM_LEN + tt_len(tt_num_changes),
|
|
- if_incoming, 0, bat_ogm_fwd_send_time());
|
|
+ bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
|
|
+ BATMAN_OGM_LEN + tt_len(tt_num_changes),
|
|
+ if_incoming, 0, bat_iv_ogm_fwd_send_time());
|
|
}
|
|
|
|
-void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
|
|
+static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
|
|
+ int tt_num_changes)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
|
struct batman_ogm_packet *batman_ogm_packet;
|
|
@@ -564,21 +566,22 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
|
|
atomic_inc(&hard_iface->seqno);
|
|
|
|
slide_own_bcast_window(hard_iface);
|
|
- bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
|
|
- hard_iface->packet_len, hard_iface, 1,
|
|
- bat_ogm_emit_send_time(bat_priv));
|
|
+ bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
|
|
+ hard_iface->packet_len, hard_iface, 1,
|
|
+ bat_iv_ogm_emit_send_time(bat_priv));
|
|
|
|
if (primary_if)
|
|
hardif_free_ref(primary_if);
|
|
}
|
|
|
|
-static void bat_ogm_orig_update(struct bat_priv *bat_priv,
|
|
- struct orig_node *orig_node,
|
|
- const struct ethhdr *ethhdr,
|
|
- const struct batman_ogm_packet
|
|
+static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
|
|
+ struct orig_node *orig_node,
|
|
+ const struct ethhdr *ethhdr,
|
|
+ const struct batman_ogm_packet
|
|
*batman_ogm_packet,
|
|
- struct hard_iface *if_incoming,
|
|
- const unsigned char *tt_buff, int is_duplicate)
|
|
+ struct hard_iface *if_incoming,
|
|
+ const unsigned char *tt_buff,
|
|
+ int is_duplicate)
|
|
{
|
|
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
|
|
struct neigh_node *router = NULL;
|
|
@@ -642,8 +645,8 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
|
|
spin_unlock_bh(&neigh_node->tq_lock);
|
|
|
|
if (!is_duplicate) {
|
|
- orig_node->last_ttl = batman_ogm_packet->ttl;
|
|
- neigh_node->last_ttl = batman_ogm_packet->ttl;
|
|
+ orig_node->last_ttl = batman_ogm_packet->header.ttl;
|
|
+ neigh_node->last_ttl = batman_ogm_packet->header.ttl;
|
|
}
|
|
|
|
bonding_candidate_add(orig_node, neigh_node);
|
|
@@ -683,7 +686,7 @@ update_tt:
|
|
/* I have to check for transtable changes only if the OGM has been
|
|
* sent through a primary interface */
|
|
if (((batman_ogm_packet->orig != ethhdr->h_source) &&
|
|
- (batman_ogm_packet->ttl > 2)) ||
|
|
+ (batman_ogm_packet->header.ttl > 2)) ||
|
|
(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
|
|
tt_update_orig(bat_priv, orig_node, tt_buff,
|
|
batman_ogm_packet->tt_num_changes,
|
|
@@ -713,10 +716,10 @@ out:
|
|
neigh_node_free_ref(router);
|
|
}
|
|
|
|
-static int bat_ogm_calc_tq(struct orig_node *orig_node,
|
|
- struct orig_node *orig_neigh_node,
|
|
- struct batman_ogm_packet *batman_ogm_packet,
|
|
- struct hard_iface *if_incoming)
|
|
+static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
|
|
+ struct orig_node *orig_neigh_node,
|
|
+ struct batman_ogm_packet *batman_ogm_packet,
|
|
+ struct hard_iface *if_incoming)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
|
struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
|
|
@@ -825,10 +828,10 @@ out:
|
|
* -1 the packet is old and has been received while the seqno window
|
|
* was protected. Caller should drop it.
|
|
*/
|
|
-static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
|
|
- const struct batman_ogm_packet
|
|
+static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
|
|
+ const struct batman_ogm_packet
|
|
*batman_ogm_packet,
|
|
- const struct hard_iface *if_incoming)
|
|
+ const struct hard_iface *if_incoming)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
|
struct orig_node *orig_node;
|
|
@@ -890,10 +893,10 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
- struct batman_ogm_packet *batman_ogm_packet,
|
|
- const unsigned char *tt_buff,
|
|
- struct hard_iface *if_incoming)
|
|
+static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
|
|
+ struct batman_ogm_packet *batman_ogm_packet,
|
|
+ const unsigned char *tt_buff,
|
|
+ struct hard_iface *if_incoming)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
|
struct hard_iface *hard_iface;
|
|
@@ -918,7 +921,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
* packet in an aggregation. Here we expect that the padding
|
|
* is always zero (or not 0x01)
|
|
*/
|
|
- if (batman_ogm_packet->packet_type != BAT_OGM)
|
|
+ if (batman_ogm_packet->header.packet_type != BAT_OGM)
|
|
return;
|
|
|
|
/* could be changed by schedule_own_packet() */
|
|
@@ -938,8 +941,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
|
|
batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
|
|
batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
|
|
- batman_ogm_packet->ttl, batman_ogm_packet->version,
|
|
- has_directlink_flag);
|
|
+ batman_ogm_packet->header.ttl,
|
|
+ batman_ogm_packet->header.version, has_directlink_flag);
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
|
@@ -966,10 +969,10 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
- if (batman_ogm_packet->version != COMPAT_VERSION) {
|
|
+ if (batman_ogm_packet->header.version != COMPAT_VERSION) {
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Drop packet: incompatible batman version (%i)\n",
|
|
- batman_ogm_packet->version);
|
|
+ batman_ogm_packet->header.version);
|
|
return;
|
|
}
|
|
|
|
@@ -1031,8 +1034,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
if (!orig_node)
|
|
return;
|
|
|
|
- is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet,
|
|
- if_incoming);
|
|
+ is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
|
|
+ if_incoming);
|
|
|
|
if (is_duplicate == -1) {
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
@@ -1081,8 +1084,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
goto out_neigh;
|
|
}
|
|
|
|
- is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node,
|
|
- batman_ogm_packet, if_incoming);
|
|
+ is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
|
|
+ batman_ogm_packet, if_incoming);
|
|
|
|
bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
|
|
|
|
@@ -1091,17 +1094,17 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
if (is_bidirectional &&
|
|
(!is_duplicate ||
|
|
((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
|
|
- (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl))))
|
|
- bat_ogm_orig_update(bat_priv, orig_node, ethhdr,
|
|
- batman_ogm_packet, if_incoming,
|
|
- tt_buff, is_duplicate);
|
|
+ (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
|
|
+ bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
|
|
+ batman_ogm_packet, if_incoming,
|
|
+ tt_buff, is_duplicate);
|
|
|
|
/* is single hop (direct) neighbor */
|
|
if (is_single_hop_neigh) {
|
|
|
|
/* mark direct link on incoming interface */
|
|
- bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
|
|
- 1, if_incoming);
|
|
+ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
|
|
+ 1, if_incoming);
|
|
|
|
bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
|
|
"rebroadcast neighbor packet with direct link flag\n");
|
|
@@ -1123,7 +1126,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
|
|
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Forwarding packet: rebroadcast originator packet\n");
|
|
- bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming);
|
|
+ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
|
|
+ 0, if_incoming);
|
|
|
|
out_neigh:
|
|
if ((orig_neigh_node) && (!is_single_hop_neigh))
|
|
@@ -1139,13 +1143,17 @@ out:
|
|
orig_node_free_ref(orig_node);
|
|
}
|
|
|
|
-void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
|
|
- int packet_len, struct hard_iface *if_incoming)
|
|
+static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
|
|
+ struct sk_buff *skb)
|
|
{
|
|
struct batman_ogm_packet *batman_ogm_packet;
|
|
- int buff_pos = 0;
|
|
- unsigned char *tt_buff;
|
|
+ struct ethhdr *ethhdr;
|
|
+ int buff_pos = 0, packet_len;
|
|
+ unsigned char *tt_buff, *packet_buff;
|
|
|
|
+ packet_len = skb_headlen(skb);
|
|
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
|
|
+ packet_buff = skb->data;
|
|
batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
|
|
|
|
/* unpack the aggregated packets and process them one by one */
|
|
@@ -1157,14 +1165,29 @@ void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
|
|
|
|
tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
|
|
|
|
- bat_ogm_process(ethhdr, batman_ogm_packet,
|
|
- tt_buff, if_incoming);
|
|
+ bat_iv_ogm_process(ethhdr, batman_ogm_packet,
|
|
+ tt_buff, if_incoming);
|
|
|
|
buff_pos += BATMAN_OGM_LEN +
|
|
tt_len(batman_ogm_packet->tt_num_changes);
|
|
|
|
batman_ogm_packet = (struct batman_ogm_packet *)
|
|
(packet_buff + buff_pos);
|
|
- } while (bat_ogm_aggr_packet(buff_pos, packet_len,
|
|
- batman_ogm_packet->tt_num_changes));
|
|
+ } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
|
|
+ batman_ogm_packet->tt_num_changes));
|
|
+}
|
|
+
|
|
+static struct bat_algo_ops batman_iv __read_mostly = {
|
|
+ .name = "BATMAN IV",
|
|
+ .bat_ogm_init = bat_iv_ogm_init,
|
|
+ .bat_ogm_init_primary = bat_iv_ogm_init_primary,
|
|
+ .bat_ogm_update_mac = bat_iv_ogm_update_mac,
|
|
+ .bat_ogm_schedule = bat_iv_ogm_schedule,
|
|
+ .bat_ogm_emit = bat_iv_ogm_emit,
|
|
+ .bat_ogm_receive = bat_iv_ogm_receive,
|
|
+};
|
|
+
|
|
+int __init bat_iv_init(void)
|
|
+{
|
|
+ return bat_algo_register(&batman_iv);
|
|
}
|
|
diff --git a/bat_ogm.h b/bat_ogm.h
|
|
deleted file mode 100644
|
|
index 69329c1..0000000
|
|
--- a/bat_ogm.h
|
|
+++ /dev/null
|
|
@@ -1,35 +0,0 @@
|
|
-/*
|
|
- * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
|
|
- *
|
|
- * Marek Lindner, Simon Wunderlich
|
|
- *
|
|
- * This program is free software; you can redistribute it and/or
|
|
- * modify it under the terms of version 2 of the GNU General Public
|
|
- * License as published by the Free Software Foundation.
|
|
- *
|
|
- * This program is distributed in the hope that it will be useful, but
|
|
- * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
- * General Public License for more details.
|
|
- *
|
|
- * You should have received a copy of the GNU General Public License
|
|
- * along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
- * 02110-1301, USA
|
|
- *
|
|
- */
|
|
-
|
|
-#ifndef _NET_BATMAN_ADV_OGM_H_
|
|
-#define _NET_BATMAN_ADV_OGM_H_
|
|
-
|
|
-#include "main.h"
|
|
-
|
|
-void bat_ogm_init(struct hard_iface *hard_iface);
|
|
-void bat_ogm_init_primary(struct hard_iface *hard_iface);
|
|
-void bat_ogm_update_mac(struct hard_iface *hard_iface);
|
|
-void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
|
|
-void bat_ogm_emit(struct forw_packet *forw_packet);
|
|
-void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
|
|
- int packet_len, struct hard_iface *if_incoming);
|
|
-
|
|
-#endif /* _NET_BATMAN_ADV_OGM_H_ */
|
|
diff --git a/bat_sysfs.c b/bat_sysfs.c
|
|
index b8a7414..fd4f786 100644
|
|
--- a/bat_sysfs.c
|
|
+++ b/bat_sysfs.c
|
|
@@ -174,7 +174,7 @@ static int store_uint_attr(const char *buff, size_t count,
|
|
unsigned long uint_val;
|
|
int ret;
|
|
|
|
- ret = strict_strtoul(buff, 10, &uint_val);
|
|
+ ret = kstrtoul(buff, 10, &uint_val);
|
|
if (ret) {
|
|
bat_info(net_dev,
|
|
"%s: Invalid parameter received: %s\n",
|
|
@@ -239,7 +239,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
|
|
unsigned long val;
|
|
int ret, vis_mode_tmp = -1;
|
|
|
|
- ret = strict_strtoul(buff, 10, &val);
|
|
+ ret = kstrtoul(buff, 10, &val);
|
|
|
|
if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
|
|
(strncmp(buff, "client", 6) == 0) ||
|
|
@@ -272,6 +272,13 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
|
|
return count;
|
|
}
|
|
|
|
+static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
|
|
+ char *buff)
|
|
+{
|
|
+ struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
|
|
+ return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
|
|
+}
|
|
+
|
|
static void post_gw_deselect(struct net_device *net_dev)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
|
@@ -379,9 +386,13 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
|
|
|
|
BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
|
|
BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
|
|
+#ifdef CONFIG_BATMAN_ADV_BLA
|
|
+BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
|
|
+#endif
|
|
BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
|
|
BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
|
|
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
|
|
+static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
|
|
static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
|
|
BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
|
|
BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
|
|
@@ -390,15 +401,19 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
|
|
static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
|
|
store_gw_bwidth);
|
|
#ifdef CONFIG_BATMAN_ADV_DEBUG
|
|
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
|
|
+BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
|
|
#endif
|
|
|
|
static struct bat_attribute *mesh_attrs[] = {
|
|
&bat_attr_aggregated_ogms,
|
|
&bat_attr_bonding,
|
|
+#ifdef CONFIG_BATMAN_ADV_BLA
|
|
+ &bat_attr_bridge_loop_avoidance,
|
|
+#endif
|
|
&bat_attr_fragmentation,
|
|
&bat_attr_ap_isolation,
|
|
&bat_attr_vis_mode,
|
|
+ &bat_attr_routing_algo,
|
|
&bat_attr_gw_mode,
|
|
&bat_attr_orig_interval,
|
|
&bat_attr_hop_penalty,
|
|
diff --git a/bitarray.c b/bitarray.c
|
|
index 0be9ff3..9bc63b2 100644
|
|
--- a/bitarray.c
|
|
+++ b/bitarray.c
|
|
@@ -155,7 +155,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
|
|
/* sequence number is much newer, probably missed a lot of packets */
|
|
|
|
if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
|
|
- || (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
|
|
+ && (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"We missed a lot of packets (%i) !\n",
|
|
seq_num_diff - 1);
|
|
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c
|
|
new file mode 100644
|
|
index 0000000..99b0a8f
|
|
--- /dev/null
|
|
+++ b/bridge_loop_avoidance.c
|
|
@@ -0,0 +1,1594 @@
|
|
+/*
|
|
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
|
|
+ *
|
|
+ * Simon Wunderlich
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of version 2 of the GNU General Public
|
|
+ * License as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but
|
|
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
+ * 02110-1301, USA
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "main.h"
|
|
+#include "hash.h"
|
|
+#include "hard-interface.h"
|
|
+#include "originator.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
+#include "translation-table.h"
|
|
+#include "send.h"
|
|
+
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/crc16.h>
|
|
+#include <linux/if_arp.h>
|
|
+#include <net/arp.h>
|
|
+#include <linux/if_vlan.h>
|
|
+
|
|
+static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
|
|
+
|
|
+static void bla_periodic_work(struct work_struct *work);
|
|
+static void bla_send_announce(struct bat_priv *bat_priv,
|
|
+ struct backbone_gw *backbone_gw);
|
|
+
|
|
+/* return the index of the claim */
|
|
+static inline uint32_t choose_claim(const void *data, uint32_t size)
|
|
+{
|
|
+ const unsigned char *key = data;
|
|
+ uint32_t hash = 0;
|
|
+ size_t i;
|
|
+
|
|
+ for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
|
|
+ hash += key[i];
|
|
+ hash += (hash << 10);
|
|
+ hash ^= (hash >> 6);
|
|
+ }
|
|
+
|
|
+ hash += (hash << 3);
|
|
+ hash ^= (hash >> 11);
|
|
+ hash += (hash << 15);
|
|
+
|
|
+ return hash % size;
|
|
+}
|
|
+
|
|
+/* return the index of the backbone gateway */
|
|
+static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
|
|
+{
|
|
+ const unsigned char *key = data;
|
|
+ uint32_t hash = 0;
|
|
+ size_t i;
|
|
+
|
|
+ for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
|
|
+ hash += key[i];
|
|
+ hash += (hash << 10);
|
|
+ hash ^= (hash >> 6);
|
|
+ }
|
|
+
|
|
+ hash += (hash << 3);
|
|
+ hash ^= (hash >> 11);
|
|
+ hash += (hash << 15);
|
|
+
|
|
+ return hash % size;
|
|
+}
|
|
+
|
|
+
|
|
+/* compares address and vid of two backbone gws */
|
|
+static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
|
|
+{
|
|
+ const void *data1 = container_of(node, struct backbone_gw,
|
|
+ hash_entry);
|
|
+
|
|
+ return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
|
|
+}
|
|
+
|
|
+/* compares address and vid of two claims */
|
|
+static int compare_claim(const struct hlist_node *node, const void *data2)
|
|
+{
|
|
+ const void *data1 = container_of(node, struct claim,
|
|
+ hash_entry);
|
|
+
|
|
+ return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
|
|
+}
|
|
+
|
|
+/* free a backbone gw */
|
|
+static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
|
|
+{
|
|
+ if (atomic_dec_and_test(&backbone_gw->refcount))
|
|
+ kfree_rcu(backbone_gw, rcu);
|
|
+}
|
|
+
|
|
+/* finally deinitialize the claim */
|
|
+static void claim_free_rcu(struct rcu_head *rcu)
|
|
+{
|
|
+ struct claim *claim;
|
|
+
|
|
+ claim = container_of(rcu, struct claim, rcu);
|
|
+
|
|
+ backbone_gw_free_ref(claim->backbone_gw);
|
|
+ kfree(claim);
|
|
+}
|
|
+
|
|
+/* free a claim, call claim_free_rcu if its the last reference */
|
|
+static void claim_free_ref(struct claim *claim)
|
|
+{
|
|
+ if (atomic_dec_and_test(&claim->refcount))
|
|
+ call_rcu(&claim->rcu, claim_free_rcu);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @data: search data (may be local/static data)
|
|
+ *
|
|
+ * looks for a claim in the hash, and returns it if found
|
|
+ * or NULL otherwise.
|
|
+ */
|
|
+
|
|
+static struct claim *claim_hash_find(struct bat_priv *bat_priv,
|
|
+ struct claim *data)
|
|
+{
|
|
+ struct hashtable_t *hash = bat_priv->claim_hash;
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node;
|
|
+ struct claim *claim;
|
|
+ struct claim *claim_tmp = NULL;
|
|
+ int index;
|
|
+
|
|
+ if (!hash)
|
|
+ return NULL;
|
|
+
|
|
+ index = choose_claim(data, hash->size);
|
|
+ head = &hash->table[index];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
|
|
+ if (!compare_claim(&claim->hash_entry, data))
|
|
+ continue;
|
|
+
|
|
+ if (!atomic_inc_not_zero(&claim->refcount))
|
|
+ continue;
|
|
+
|
|
+ claim_tmp = claim;
|
|
+ break;
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ return claim_tmp;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @addr: the address of the originator
|
|
+ * @vid: the VLAN ID
|
|
+ *
|
|
+ * looks for a claim in the hash, and returns it if found
|
|
+ * or NULL otherwise.
|
|
+ */
|
|
+
|
|
+static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
|
|
+ uint8_t *addr, short vid)
|
|
+{
|
|
+ struct hashtable_t *hash = bat_priv->backbone_hash;
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node;
|
|
+ struct backbone_gw search_entry, *backbone_gw;
|
|
+ struct backbone_gw *backbone_gw_tmp = NULL;
|
|
+ int index;
|
|
+
|
|
+ if (!hash)
|
|
+ return NULL;
|
|
+
|
|
+ memcpy(search_entry.orig, addr, ETH_ALEN);
|
|
+ search_entry.vid = vid;
|
|
+
|
|
+ index = choose_backbone_gw(&search_entry, hash->size);
|
|
+ head = &hash->table[index];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
|
|
+ if (!compare_backbone_gw(&backbone_gw->hash_entry,
|
|
+ &search_entry))
|
|
+ continue;
|
|
+
|
|
+ if (!atomic_inc_not_zero(&backbone_gw->refcount))
|
|
+ continue;
|
|
+
|
|
+ backbone_gw_tmp = backbone_gw;
|
|
+ break;
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ return backbone_gw_tmp;
|
|
+}
|
|
+
|
|
+/* delete all claims for a backbone */
|
|
+static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
|
|
+{
|
|
+ struct hashtable_t *hash;
|
|
+ struct hlist_node *node, *node_tmp;
|
|
+ struct hlist_head *head;
|
|
+ struct claim *claim;
|
|
+ int i;
|
|
+ spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
+
|
|
+ hash = backbone_gw->bat_priv->claim_hash;
|
|
+ if (!hash)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+ list_lock = &hash->list_locks[i];
|
|
+
|
|
+ spin_lock_bh(list_lock);
|
|
+ hlist_for_each_entry_safe(claim, node, node_tmp,
|
|
+ head, hash_entry) {
|
|
+
|
|
+ if (claim->backbone_gw != backbone_gw)
|
|
+ continue;
|
|
+
|
|
+ claim_free_ref(claim);
|
|
+ hlist_del_rcu(node);
|
|
+ }
|
|
+ spin_unlock_bh(list_lock);
|
|
+ }
|
|
+
|
|
+ /* all claims gone, intialize CRC */
|
|
+ backbone_gw->crc = BLA_CRC_INIT;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @orig: the mac address to be announced within the claim
|
|
+ * @vid: the VLAN ID
|
|
+ * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
|
|
+ *
|
|
+ * sends a claim frame according to the provided info.
|
|
+ */
|
|
+
|
|
+static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
|
|
+ short vid, int claimtype)
|
|
+{
|
|
+ struct sk_buff *skb;
|
|
+ struct ethhdr *ethhdr;
|
|
+ struct hard_iface *primary_if;
|
|
+ struct net_device *soft_iface;
|
|
+ uint8_t *hw_src;
|
|
+ struct bla_claim_dst local_claim_dest;
|
|
+ uint32_t zeroip = 0;
|
|
+
|
|
+ primary_if = primary_if_get_selected(bat_priv);
|
|
+ if (!primary_if)
|
|
+ return;
|
|
+
|
|
+ memcpy(&local_claim_dest, &bat_priv->claim_dest,
|
|
+ sizeof(local_claim_dest));
|
|
+ local_claim_dest.type = claimtype;
|
|
+
|
|
+ soft_iface = primary_if->soft_iface;
|
|
+
|
|
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
|
|
+ /* IP DST: 0.0.0.0 */
|
|
+ zeroip,
|
|
+ primary_if->soft_iface,
|
|
+ /* IP SRC: 0.0.0.0 */
|
|
+ zeroip,
|
|
+ /* Ethernet DST: Broadcast */
|
|
+ NULL,
|
|
+ /* Ethernet SRC/HW SRC: originator mac */
|
|
+ primary_if->net_dev->dev_addr,
|
|
+ /* HW DST: FF:43:05:XX:00:00
|
|
+ * with XX = claim type
|
|
+ * and YY:YY = group id */
|
|
+ (uint8_t *)&local_claim_dest);
|
|
+
|
|
+ if (!skb)
|
|
+ goto out;
|
|
+
|
|
+ ethhdr = (struct ethhdr *)skb->data;
|
|
+ hw_src = (uint8_t *) ethhdr +
|
|
+ sizeof(struct ethhdr) +
|
|
+ sizeof(struct arphdr);
|
|
+
|
|
+ /* now we pretend that the client would have sent this ... */
|
|
+ switch (claimtype) {
|
|
+ case CLAIM_TYPE_ADD:
|
|
+ /*
|
|
+ * normal claim frame
|
|
+ * set Ethernet SRC to the clients mac
|
|
+ */
|
|
+ memcpy(ethhdr->h_source, mac, ETH_ALEN);
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
|
|
+ break;
|
|
+ case CLAIM_TYPE_DEL:
|
|
+ /*
|
|
+ * unclaim frame
|
|
+ * set HW SRC to the clients mac
|
|
+ */
|
|
+ memcpy(hw_src, mac, ETH_ALEN);
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
|
|
+ break;
|
|
+ case CLAIM_TYPE_ANNOUNCE:
|
|
+ /*
|
|
+ * announcement frame
|
|
+ * set HW SRC to the special mac containg the crc
|
|
+ */
|
|
+ memcpy(hw_src, mac, ETH_ALEN);
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
|
|
+ ethhdr->h_source, vid);
|
|
+ break;
|
|
+ case CLAIM_TYPE_REQUEST:
|
|
+ /*
|
|
+ * request frame
|
|
+ * set HW SRC to the special mac containg the crc
|
|
+ */
|
|
+ memcpy(hw_src, mac, ETH_ALEN);
|
|
+ memcpy(ethhdr->h_dest, mac, ETH_ALEN);
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
|
|
+ ethhdr->h_source, ethhdr->h_dest, vid);
|
|
+ break;
|
|
+
|
|
+ }
|
|
+
|
|
+ if (vid != -1)
|
|
+ skb = vlan_insert_tag(skb, vid);
|
|
+
|
|
+ skb_reset_mac_header(skb);
|
|
+ skb->protocol = eth_type_trans(skb, soft_iface);
|
|
+ bat_priv->stats.rx_packets++;
|
|
+ bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
|
|
+ soft_iface->last_rx = jiffies;
|
|
+
|
|
+ netif_rx(skb);
|
|
+out:
|
|
+ if (primary_if)
|
|
+ hardif_free_ref(primary_if);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @orig: the mac address of the originator
|
|
+ * @vid: the VLAN ID
|
|
+ *
|
|
+ * searches for the backbone gw or creates a new one if it could not
|
|
+ * be found.
|
|
+ */
|
|
+
|
|
+static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
|
|
+ uint8_t *orig, short vid)
|
|
+{
|
|
+ struct backbone_gw *entry;
|
|
+ struct orig_node *orig_node;
|
|
+ int hash_added;
|
|
+
|
|
+ entry = backbone_hash_find(bat_priv, orig, vid);
|
|
+
|
|
+ if (entry)
|
|
+ return entry;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_get_backbone_gw(): not found (%pM, %d),"
|
|
+ " creating new entry\n", orig, vid);
|
|
+
|
|
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
|
|
+ if (!entry)
|
|
+ return NULL;
|
|
+
|
|
+ entry->vid = vid;
|
|
+ entry->lasttime = jiffies;
|
|
+ entry->crc = BLA_CRC_INIT;
|
|
+ entry->bat_priv = bat_priv;
|
|
+ atomic_set(&entry->request_sent, 0);
|
|
+ memcpy(entry->orig, orig, ETH_ALEN);
|
|
+
|
|
+ /* one for the hash, one for returning */
|
|
+ atomic_set(&entry->refcount, 2);
|
|
+
|
|
+ hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
|
|
+ choose_backbone_gw, entry, &entry->hash_entry);
|
|
+
|
|
+ if (unlikely(hash_added != 0)) {
|
|
+ /* hash failed, free the structure */
|
|
+ kfree(entry);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ /* this is a gateway now, remove any tt entries */
|
|
+ orig_node = orig_hash_find(bat_priv, orig);
|
|
+ if (orig_node) {
|
|
+ tt_global_del_orig(bat_priv, orig_node,
|
|
+ "became a backbone gateway");
|
|
+ orig_node_free_ref(orig_node);
|
|
+ }
|
|
+ return entry;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * update or add the own backbone gw to make sure we announce
|
|
+ * where we receive other backbone gws
|
|
+ */
|
|
+static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if,
|
|
+ short vid)
|
|
+{
|
|
+ struct backbone_gw *backbone_gw;
|
|
+
|
|
+ backbone_gw = bla_get_backbone_gw(bat_priv,
|
|
+ primary_if->net_dev->dev_addr, vid);
|
|
+ if (unlikely(!backbone_gw))
|
|
+ return;
|
|
+
|
|
+ backbone_gw->lasttime = jiffies;
|
|
+ backbone_gw_free_ref(backbone_gw);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @vid: the vid where the request came on
|
|
+ *
|
|
+ * Repeat all of our own claims, and finally send an ANNOUNCE frame
|
|
+ * to allow the requester another check if the CRC is correct now.
|
|
+ */
|
|
+
|
|
+static void bla_answer_request(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if, short vid)
|
|
+{
|
|
+ struct hlist_node *node;
|
|
+ struct hlist_head *head;
|
|
+ struct hashtable_t *hash;
|
|
+ struct claim *claim;
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ int i;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_answer_request(): received a "
|
|
+ "claim request, send all of our own claims again\n");
|
|
+
|
|
+ backbone_gw = backbone_hash_find(bat_priv,
|
|
+ primary_if->net_dev->dev_addr, vid);
|
|
+ if (!backbone_gw)
|
|
+ return;
|
|
+
|
|
+ hash = bat_priv->claim_hash;
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
|
|
+ /* only own claims are interesting */
|
|
+ if (claim->backbone_gw != backbone_gw)
|
|
+ continue;
|
|
+
|
|
+ bla_send_claim(bat_priv, claim->addr, claim->vid,
|
|
+ CLAIM_TYPE_ADD);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
+
|
|
+ /* finally, send an announcement frame */
|
|
+ bla_send_announce(bat_priv, backbone_gw);
|
|
+ backbone_gw_free_ref(backbone_gw);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @backbone_gw: the backbone gateway from whom we are out of sync
|
|
+ *
|
|
+ * When the crc is wrong, ask the backbone gateway for a full table update.
|
|
+ * After the request, it will repeat all of his own claims and finally
|
|
+ * send an announcement claim with which we can check again.
|
|
+ */
|
|
+
|
|
+static void bla_send_request(struct backbone_gw *backbone_gw)
|
|
+{
|
|
+ /* first, remove all old entries */
|
|
+ bla_del_backbone_claims(backbone_gw);
|
|
+
|
|
+ bat_dbg(DBG_BLA, backbone_gw->bat_priv,
|
|
+ "Sending REQUEST to %pM\n",
|
|
+ backbone_gw->orig);
|
|
+
|
|
+ /* send request */
|
|
+ bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
|
|
+ backbone_gw->vid, CLAIM_TYPE_REQUEST);
|
|
+
|
|
+ /* no local broadcasts should be sent or received, for now. */
|
|
+ if (!atomic_read(&backbone_gw->request_sent)) {
|
|
+ atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
|
|
+ atomic_set(&backbone_gw->request_sent, 1);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @backbone_gw: our backbone gateway which should be announced
|
|
+ *
|
|
+ * This function sends an announcement. It is called from multiple
|
|
+ * places.
|
|
+ */
|
|
+static void bla_send_announce(struct bat_priv *bat_priv,
|
|
+ struct backbone_gw *backbone_gw)
|
|
+{
|
|
+ uint8_t mac[ETH_ALEN];
|
|
+ uint16_t crc;
|
|
+
|
|
+ memcpy(mac, announce_mac, 4);
|
|
+ crc = htons(backbone_gw->crc);
|
|
+ memcpy(&mac[4], (uint8_t *) &crc, 2);
|
|
+
|
|
+ bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
|
|
+
|
|
+}
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @mac: the mac address of the claim
|
|
+ * @vid: the VLAN ID of the frame
|
|
+ * @backbone_gw: the backbone gateway which claims it
|
|
+ *
|
|
+ * Adds a claim in the claim hash.
|
|
+ */
|
|
+
|
|
+static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
|
|
+ const short vid, struct backbone_gw *backbone_gw)
|
|
+{
|
|
+ struct claim *claim;
|
|
+ struct claim search_claim;
|
|
+ int hash_added;
|
|
+
|
|
+ memcpy(search_claim.addr, mac, ETH_ALEN);
|
|
+ search_claim.vid = vid;
|
|
+ claim = claim_hash_find(bat_priv, &search_claim);
|
|
+
|
|
+ /* create a new claim entry if it does not exist yet. */
|
|
+ if (!claim) {
|
|
+ claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
|
|
+ if (!claim)
|
|
+ return;
|
|
+
|
|
+ memcpy(claim->addr, mac, ETH_ALEN);
|
|
+ claim->vid = vid;
|
|
+ claim->lasttime = jiffies;
|
|
+ claim->backbone_gw = backbone_gw;
|
|
+
|
|
+ atomic_set(&claim->refcount, 2);
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
|
|
+ mac, vid);
|
|
+ hash_added = hash_add(bat_priv->claim_hash, compare_claim,
|
|
+ choose_claim, claim, &claim->hash_entry);
|
|
+
|
|
+ if (unlikely(hash_added != 0)) {
|
|
+ /* only local changes happened. */
|
|
+ kfree(claim);
|
|
+ return;
|
|
+ }
|
|
+ } else {
|
|
+ claim->lasttime = jiffies;
|
|
+ if (claim->backbone_gw == backbone_gw)
|
|
+ /* no need to register a new backbone */
|
|
+ goto claim_free_ref;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_add_claim(): changing ownership for %pM, vid %d\n",
|
|
+ mac, vid);
|
|
+
|
|
+ claim->backbone_gw->crc ^=
|
|
+ crc16(0, claim->addr, ETH_ALEN);
|
|
+ backbone_gw_free_ref(claim->backbone_gw);
|
|
+
|
|
+ }
|
|
+ /* set (new) backbone gw */
|
|
+ atomic_inc(&backbone_gw->refcount);
|
|
+ claim->backbone_gw = backbone_gw;
|
|
+
|
|
+ backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
|
+ backbone_gw->lasttime = jiffies;
|
|
+
|
|
+claim_free_ref:
|
|
+ claim_free_ref(claim);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Delete a claim from the claim hash which has the
|
|
+ * given mac address and vid.
|
|
+ */
|
|
+static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
|
|
+ const short vid)
|
|
+{
|
|
+ struct claim search_claim, *claim;
|
|
+
|
|
+ memcpy(search_claim.addr, mac, ETH_ALEN);
|
|
+ search_claim.vid = vid;
|
|
+ claim = claim_hash_find(bat_priv, &search_claim);
|
|
+ if (!claim)
|
|
+ return;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
|
|
+
|
|
+ hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
|
|
+ claim_free_ref(claim); /* reference from the hash is gone */
|
|
+
|
|
+ claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
|
|
+
|
|
+ /* don't need the reference from hash_find() anymore */
|
|
+ claim_free_ref(claim);
|
|
+}
|
|
+
|
|
+/* check for ANNOUNCE frame, return 1 if handled */
|
|
+static int handle_announce(struct bat_priv *bat_priv,
|
|
+ uint8_t *an_addr, uint8_t *backbone_addr, short vid)
|
|
+{
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ uint16_t crc;
|
|
+
|
|
+ if (memcmp(an_addr, announce_mac, 4) != 0)
|
|
+ return 0;
|
|
+
|
|
+ backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
|
|
+
|
|
+ if (unlikely(!backbone_gw))
|
|
+ return 1;
|
|
+
|
|
+
|
|
+ /* handle as ANNOUNCE frame */
|
|
+ backbone_gw->lasttime = jiffies;
|
|
+ crc = ntohs(*((uint16_t *) (&an_addr[4])));
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "handle_announce(): ANNOUNCE vid %d (sent "
|
|
+ "by %pM)... CRC = %04x\n",
|
|
+ vid, backbone_gw->orig, crc);
|
|
+
|
|
+ if (backbone_gw->crc != crc) {
|
|
+ bat_dbg(DBG_BLA, backbone_gw->bat_priv,
|
|
+ "handle_announce(): CRC FAILED for %pM/%d"
|
|
+ "(my = %04x, sent = %04x)\n",
|
|
+ backbone_gw->orig, backbone_gw->vid,
|
|
+ backbone_gw->crc, crc);
|
|
+
|
|
+ bla_send_request(backbone_gw);
|
|
+ } else {
|
|
+ /* if we have sent a request and the crc was OK,
|
|
+ * we can allow traffic again. */
|
|
+ if (atomic_read(&backbone_gw->request_sent)) {
|
|
+ atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
|
|
+ atomic_set(&backbone_gw->request_sent, 0);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ backbone_gw_free_ref(backbone_gw);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/* check for REQUEST frame, return 1 if handled */
|
|
+static int handle_request(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if,
|
|
+ uint8_t *backbone_addr,
|
|
+ struct ethhdr *ethhdr, short vid)
|
|
+{
|
|
+ /* check for REQUEST frame */
|
|
+ if (!compare_eth(backbone_addr, ethhdr->h_dest))
|
|
+ return 0;
|
|
+
|
|
+ /* sanity check, this should not happen on a normal switch,
|
|
+ * we ignore it in this case. */
|
|
+ if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
|
|
+ return 1;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "handle_request(): REQUEST vid %d (sent "
|
|
+ "by %pM)...\n",
|
|
+ vid, ethhdr->h_source);
|
|
+
|
|
+ bla_answer_request(bat_priv, primary_if, vid);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/* check for UNCLAIM frame, return 1 if handled */
|
|
+static int handle_unclaim(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if,
|
|
+ uint8_t *backbone_addr,
|
|
+ uint8_t *claim_addr, short vid)
|
|
+{
|
|
+ struct backbone_gw *backbone_gw;
|
|
+
|
|
+ /* unclaim in any case if it is our own */
|
|
+ if (primary_if && compare_eth(backbone_addr,
|
|
+ primary_if->net_dev->dev_addr))
|
|
+ bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
|
|
+
|
|
+ backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
|
|
+
|
|
+ if (!backbone_gw)
|
|
+ return 1;
|
|
+
|
|
+ /* this must be an UNCLAIM frame */
|
|
+ bat_dbg(DBG_BLA, bat_priv, "handle_unclaim():"
|
|
+ "UNCLAIM %pM on vid %d (sent by %pM)...\n",
|
|
+ claim_addr, vid, backbone_gw->orig);
|
|
+
|
|
+ bla_del_claim(bat_priv, claim_addr, vid);
|
|
+ backbone_gw_free_ref(backbone_gw);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/* check for CLAIM frame, return 1 if handled */
|
|
+static int handle_claim(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if, uint8_t *backbone_addr,
|
|
+ uint8_t *claim_addr, short vid)
|
|
+{
|
|
+ struct backbone_gw *backbone_gw;
|
|
+
|
|
+ /* register the gateway if not yet available, and add the claim. */
|
|
+
|
|
+ backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
|
|
+
|
|
+ if (unlikely(!backbone_gw))
|
|
+ return 1;
|
|
+
|
|
+ /* this must be a CLAIM frame */
|
|
+ bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
|
|
+ if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
|
|
+ bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
|
|
+
|
|
+ /* TODO: we could call something like tt_local_del() here. */
|
|
+
|
|
+ backbone_gw_free_ref(backbone_gw);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/**
|
|
+ *
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @hw_src: the Hardware source in the ARP Header
|
|
+ * @hw_dst: the Hardware destination in the ARP Header
|
|
+ * @ethhdr: pointer to the Ethernet header of the claim frame
|
|
+ *
|
|
+ * checks if it is a claim packet and if its on the same group.
|
|
+ * This function also applies the group ID of the sender
|
|
+ * if it is in the same mesh.
|
|
+ *
|
|
+ * returns:
|
|
+ * 2 - if it is a claim packet and on the same group
|
|
+ * 1 - if is a claim packet from another group
|
|
+ * 0 - if it is not a claim packet
|
|
+ */
|
|
+static int check_claim_group(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if,
|
|
+ uint8_t *hw_src, uint8_t *hw_dst,
|
|
+ struct ethhdr *ethhdr)
|
|
+{
|
|
+ uint8_t *backbone_addr;
|
|
+ struct orig_node *orig_node;
|
|
+ struct bla_claim_dst *bla_dst, *bla_dst_own;
|
|
+
|
|
+ bla_dst = (struct bla_claim_dst *) hw_dst;
|
|
+ bla_dst_own = &bat_priv->claim_dest;
|
|
+
|
|
+ /* check if it is a claim packet in general */
|
|
+ if (memcmp(bla_dst->magic, bla_dst_own->magic,
|
|
+ sizeof(bla_dst->magic)) != 0)
|
|
+ return 0;
|
|
+
|
|
+ /* if announcement packet, use the source,
|
|
+ * otherwise assume it is in the hw_src */
|
|
+ switch (bla_dst->type) {
|
|
+ case CLAIM_TYPE_ADD:
|
|
+ backbone_addr = hw_src;
|
|
+ break;
|
|
+ case CLAIM_TYPE_REQUEST:
|
|
+ case CLAIM_TYPE_ANNOUNCE:
|
|
+ case CLAIM_TYPE_DEL:
|
|
+ backbone_addr = ethhdr->h_source;
|
|
+ break;
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* don't accept claim frames from ourselves */
|
|
+ if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
|
|
+ return 0;
|
|
+
|
|
+ /* if its already the same group, it is fine. */
|
|
+ if (bla_dst->group == bla_dst_own->group)
|
|
+ return 2;
|
|
+
|
|
+ /* lets see if this originator is in our mesh */
|
|
+ orig_node = orig_hash_find(bat_priv, backbone_addr);
|
|
+
|
|
+ /* dont accept claims from gateways which are not in
|
|
+ * the same mesh or group. */
|
|
+ if (!orig_node)
|
|
+ return 1;
|
|
+
|
|
+ /* if our mesh friends mac is bigger, use it for ourselves. */
|
|
+ if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "taking other backbones claim group: %04x\n",
|
|
+ ntohs(bla_dst->group));
|
|
+ bla_dst_own->group = bla_dst->group;
|
|
+ }
|
|
+
|
|
+ orig_node_free_ref(orig_node);
|
|
+
|
|
+ return 2;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @skb: the frame to be checked
|
|
+ *
|
|
+ * Check if this is a claim frame, and process it accordingly.
|
|
+ *
|
|
+ * returns 1 if it was a claim frame, otherwise return 0 to
|
|
+ * tell the callee that it can use the frame on its own.
|
|
+ */
|
|
+
|
|
+static int bla_process_claim(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if,
|
|
+ struct sk_buff *skb)
|
|
+{
|
|
+ struct ethhdr *ethhdr;
|
|
+ struct vlan_ethhdr *vhdr;
|
|
+ struct arphdr *arphdr;
|
|
+ uint8_t *hw_src, *hw_dst;
|
|
+ struct bla_claim_dst *bla_dst;
|
|
+ uint16_t proto;
|
|
+ int headlen;
|
|
+ short vid = -1;
|
|
+ int ret;
|
|
+
|
|
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
|
|
+
|
|
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
|
|
+ vhdr = (struct vlan_ethhdr *) ethhdr;
|
|
+ vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
|
|
+ proto = ntohs(vhdr->h_vlan_encapsulated_proto);
|
|
+ headlen = sizeof(*vhdr);
|
|
+ } else {
|
|
+ proto = ntohs(ethhdr->h_proto);
|
|
+ headlen = sizeof(*ethhdr);
|
|
+ }
|
|
+
|
|
+ if (proto != ETH_P_ARP)
|
|
+ return 0; /* not a claim frame */
|
|
+
|
|
+ /* this must be a ARP frame. check if it is a claim. */
|
|
+
|
|
+ if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
|
|
+ return 0;
|
|
+
|
|
+ /* pskb_may_pull() may have modified the pointers, get ethhdr again */
|
|
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
|
|
+ arphdr = (struct arphdr *) ((uint8_t *) ethhdr + headlen);
|
|
+
|
|
+ /* Check whether the ARP frame carries a valid
|
|
+ * IP information */
|
|
+
|
|
+ if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
|
|
+ return 0;
|
|
+ if (arphdr->ar_pro != htons(ETH_P_IP))
|
|
+ return 0;
|
|
+ if (arphdr->ar_hln != ETH_ALEN)
|
|
+ return 0;
|
|
+ if (arphdr->ar_pln != 4)
|
|
+ return 0;
|
|
+
|
|
+ hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
|
|
+ hw_dst = hw_src + ETH_ALEN + 4;
|
|
+ bla_dst = (struct bla_claim_dst *) hw_dst;
|
|
+
|
|
+ /* check if it is a claim frame. */
|
|
+ ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
|
|
+ if (ret == 1)
|
|
+ bat_dbg(DBG_BLA, bat_priv, "bla_process_claim(): received "
|
|
+ "a claim frame from another group. From: "
|
|
+ "%pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
|
|
+ ethhdr->h_source, vid, hw_src, hw_dst);
|
|
+
|
|
+ if (ret < 2)
|
|
+ return ret;
|
|
+
|
|
+ /* become a backbone gw ourselves on this vlan if not happened yet */
|
|
+ bla_update_own_backbone_gw(bat_priv, primary_if, vid);
|
|
+
|
|
+ /* check for the different types of claim frames ... */
|
|
+ switch (bla_dst->type) {
|
|
+ case CLAIM_TYPE_ADD:
|
|
+ if (handle_claim(bat_priv, primary_if, hw_src,
|
|
+ ethhdr->h_source, vid))
|
|
+ return 1;
|
|
+ break;
|
|
+ case CLAIM_TYPE_DEL:
|
|
+ if (handle_unclaim(bat_priv, primary_if,
|
|
+ ethhdr->h_source, hw_src, vid))
|
|
+ return 1;
|
|
+ break;
|
|
+
|
|
+ case CLAIM_TYPE_ANNOUNCE:
|
|
+ if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
|
|
+ return 1;
|
|
+ break;
|
|
+ case CLAIM_TYPE_REQUEST:
|
|
+ if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
|
|
+ return 1;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv, "bla_process_claim(): ERROR - this looks"
|
|
+ "like a claim frame, but is useless. eth src"
|
|
+ "%pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
|
|
+ ethhdr->h_source, vid, hw_src, hw_dst);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Check when we last heard from other nodes, and remove them in case of
|
|
+ * a time out, or clean all backbone gws if now is set.
|
|
+ */
|
|
+static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
|
|
+{
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ struct hlist_node *node, *node_tmp;
|
|
+ struct hlist_head *head;
|
|
+ struct hashtable_t *hash;
|
|
+ spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
+ int i;
|
|
+
|
|
+ hash = bat_priv->backbone_hash;
|
|
+ if (!hash)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+ list_lock = &hash->list_locks[i];
|
|
+
|
|
+ spin_lock_bh(list_lock);
|
|
+ hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
|
|
+ head, hash_entry) {
|
|
+ if (now)
|
|
+ goto purge_now;
|
|
+ if (!has_timed_out(backbone_gw->lasttime,
|
|
+ BLA_BACKBONE_TIMEOUT))
|
|
+ continue;
|
|
+
|
|
+ bat_dbg(DBG_BLA, backbone_gw->bat_priv,
|
|
+ "bla_purge_backbone_gw(): backbone gw %pM"
|
|
+ " timed out\n", backbone_gw->orig);
|
|
+
|
|
+purge_now:
|
|
+ /* don't wait for the pending request anymore */
|
|
+ if (atomic_read(&backbone_gw->request_sent))
|
|
+ atomic_dec(&bat_priv->bla_num_requests);
|
|
+
|
|
+ bla_del_backbone_claims(backbone_gw);
|
|
+
|
|
+ hlist_del_rcu(node);
|
|
+ backbone_gw_free_ref(backbone_gw);
|
|
+ }
|
|
+ spin_unlock_bh(list_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @primary_if: the selected primary interface, may be NULL if now is set
|
|
+ * @now: whether the whole hash shall be wiped now
|
|
+ *
|
|
+ * Check when we heard last time from our own claims, and remove them in case of
|
|
+ * a time out, or clean all claims if now is set
|
|
+ */
|
|
+static void bla_purge_claims(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if, int now)
|
|
+{
|
|
+ struct claim *claim;
|
|
+ struct hlist_node *node;
|
|
+ struct hlist_head *head;
|
|
+ struct hashtable_t *hash;
|
|
+ int i;
|
|
+
|
|
+ hash = bat_priv->claim_hash;
|
|
+ if (!hash)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
|
|
+ if (now)
|
|
+ goto purge_now;
|
|
+ if (!compare_eth(claim->backbone_gw->orig,
|
|
+ primary_if->net_dev->dev_addr))
|
|
+ continue;
|
|
+ if (!has_timed_out(claim->lasttime,
|
|
+ BLA_CLAIM_TIMEOUT))
|
|
+ continue;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv,
|
|
+ "bla_purge_claims(): %pM, vid %d, time out\n",
|
|
+ claim->addr, claim->vid);
|
|
+
|
|
+purge_now:
|
|
+ handle_unclaim(bat_priv, primary_if,
|
|
+ claim->backbone_gw->orig,
|
|
+ claim->addr, claim->vid);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @primary_if: the new selected primary_if
|
|
+ * @oldif: the old primary interface, may be NULL
|
|
+ *
|
|
+ * Update the backbone gateways when the own orig address changes.
|
|
+ *
|
|
+ */
|
|
+void bla_update_orig_address(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if,
|
|
+ struct hard_iface *oldif)
|
|
+{
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ struct hlist_node *node;
|
|
+ struct hlist_head *head;
|
|
+ struct hashtable_t *hash;
|
|
+ int i;
|
|
+
|
|
+ /* reset bridge loop avoidance group id */
|
|
+ bat_priv->claim_dest.group =
|
|
+ htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
|
|
+
|
|
+ if (!oldif) {
|
|
+ bla_purge_claims(bat_priv, NULL, 1);
|
|
+ bla_purge_backbone_gw(bat_priv, 1);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ hash = bat_priv->backbone_hash;
|
|
+ if (!hash)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
|
|
+ /* own orig still holds the old value. */
|
|
+ if (!compare_eth(backbone_gw->orig,
|
|
+ oldif->net_dev->dev_addr))
|
|
+ continue;
|
|
+
|
|
+ memcpy(backbone_gw->orig,
|
|
+ primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
+ /* send an announce frame so others will ask for our
|
|
+ * claims and update their tables. */
|
|
+ bla_send_announce(bat_priv, backbone_gw);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+/* (re)start the timer */
|
|
+static void bla_start_timer(struct bat_priv *bat_priv)
|
|
+{
|
|
+ INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
|
|
+ queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
|
|
+ msecs_to_jiffies(BLA_PERIOD_LENGTH));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * periodic work to do:
|
|
+ * * purge structures when they are too old
|
|
+ * * send announcements
|
|
+ */
|
|
+
|
|
+static void bla_periodic_work(struct work_struct *work)
|
|
+{
|
|
+ struct delayed_work *delayed_work =
|
|
+ container_of(work, struct delayed_work, work);
|
|
+ struct bat_priv *bat_priv =
|
|
+ container_of(delayed_work, struct bat_priv, bla_work);
|
|
+ struct hlist_node *node;
|
|
+ struct hlist_head *head;
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ struct hashtable_t *hash;
|
|
+ struct hard_iface *primary_if;
|
|
+ int i;
|
|
+
|
|
+ primary_if = primary_if_get_selected(bat_priv);
|
|
+ if (!primary_if)
|
|
+ goto out;
|
|
+
|
|
+ bla_purge_claims(bat_priv, primary_if, 0);
|
|
+ bla_purge_backbone_gw(bat_priv, 0);
|
|
+
|
|
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
|
|
+ goto out;
|
|
+
|
|
+ hash = bat_priv->backbone_hash;
|
|
+ if (!hash)
|
|
+ goto out;
|
|
+
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
|
|
+ if (!compare_eth(backbone_gw->orig,
|
|
+ primary_if->net_dev->dev_addr))
|
|
+ continue;
|
|
+
|
|
+ backbone_gw->lasttime = jiffies;
|
|
+
|
|
+ bla_send_announce(bat_priv, backbone_gw);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
+out:
|
|
+ if (primary_if)
|
|
+ hardif_free_ref(primary_if);
|
|
+
|
|
+ bla_start_timer(bat_priv);
|
|
+}
|
|
+
|
|
+/* initialize all bla structures */
|
|
+int bla_init(struct bat_priv *bat_priv)
|
|
+{
|
|
+ int i;
|
|
+ uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
|
|
+ struct hard_iface *primary_if;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
|
|
+
|
|
+ /* setting claim destination address */
|
|
+ memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
|
|
+ bat_priv->claim_dest.type = 0;
|
|
+ primary_if = primary_if_get_selected(bat_priv);
|
|
+ if (primary_if) {
|
|
+ bat_priv->claim_dest.group =
|
|
+ htons(crc16(0, primary_if->net_dev->dev_addr,
|
|
+ ETH_ALEN));
|
|
+ hardif_free_ref(primary_if);
|
|
+ } else
|
|
+ bat_priv->claim_dest.group = 0; /* will be set later */
|
|
+
|
|
+ /* initialize the duplicate list */
|
|
+ for (i = 0; i < DUPLIST_SIZE; i++)
|
|
+ bat_priv->bcast_duplist[i].entrytime =
|
|
+ jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
|
|
+ bat_priv->bcast_duplist_curr = 0;
|
|
+
|
|
+ if (bat_priv->claim_hash)
|
|
+ return 1;
|
|
+
|
|
+ bat_priv->claim_hash = hash_new(128);
|
|
+ bat_priv->backbone_hash = hash_new(32);
|
|
+
|
|
+ if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
|
|
+ return -1;
|
|
+
|
|
+ bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
|
|
+
|
|
+ bla_start_timer(bat_priv);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @bcast_packet: originator mac address
|
|
+ * @hdr_size: maximum length of the frame
|
|
+ *
|
|
+ * check if it is on our broadcast list. Another gateway might
|
|
+ * have sent the same packet because it is connected to the same backbone,
|
|
+ * so we have to remove this duplicate.
|
|
+ *
|
|
+ * This is performed by checking the CRC, which will tell us
|
|
+ * with a good chance that it is the same packet. If it is furthermore
|
|
+ * sent by another host, drop it. We allow equal packets from
|
|
+ * the same host however as this might be intended.
|
|
+ *
|
|
+ **/
|
|
+
|
|
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
|
|
+ struct bcast_packet *bcast_packet,
|
|
+ int hdr_size)
|
|
+{
|
|
+ int i, length, curr;
|
|
+ uint8_t *content;
|
|
+ uint16_t crc;
|
|
+ struct bcast_duplist_entry *entry;
|
|
+
|
|
+ length = hdr_size - sizeof(*bcast_packet);
|
|
+ content = (uint8_t *) bcast_packet;
|
|
+ content += sizeof(*bcast_packet);
|
|
+
|
|
+ /* calculate the crc ... */
|
|
+ crc = crc16(0, content, length);
|
|
+
|
|
+ for (i = 0 ; i < DUPLIST_SIZE; i++) {
|
|
+ curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
|
|
+ entry = &bat_priv->bcast_duplist[curr];
|
|
+
|
|
+ /* we can stop searching if the entry is too old ;
|
|
+ * later entries will be even older */
|
|
+ if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
|
|
+ break;
|
|
+
|
|
+ if (entry->crc != crc)
|
|
+ continue;
|
|
+
|
|
+ if (compare_eth(entry->orig, bcast_packet->orig))
|
|
+ continue;
|
|
+
|
|
+ /* this entry seems to match: same crc, not too old,
|
|
+ * and from another gw. therefore return 1 to forbid it. */
|
|
+ return 1;
|
|
+ }
|
|
+ /* not found, add a new entry (overwrite the oldest entry) */
|
|
+ curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
|
|
+ entry = &bat_priv->bcast_duplist[curr];
|
|
+ entry->crc = crc;
|
|
+ entry->entrytime = jiffies;
|
|
+ memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
|
|
+ bat_priv->bcast_duplist_curr = curr;
|
|
+
|
|
+ /* allow it, its the first occurence. */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+/**
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @orig: originator mac address
|
|
+ *
|
|
+ * check if the originator is a gateway for any VLAN ID.
|
|
+ *
|
|
+ * returns 1 if it is found, 0 otherwise
|
|
+ *
|
|
+ **/
|
|
+
|
|
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
|
|
+{
|
|
+ struct hashtable_t *hash = bat_priv->backbone_hash;
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node;
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ int i;
|
|
+
|
|
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
|
|
+ return 0;
|
|
+
|
|
+ if (!hash)
|
|
+ return 0;
|
|
+
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
|
|
+ if (compare_eth(backbone_gw->orig, orig)) {
|
|
+ rcu_read_unlock();
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * @skb: the frame to be checked
|
|
+ * @orig_node: the orig_node of the frame
|
|
+ * @hdr_size: maximum length of the frame
|
|
+ *
|
|
+ * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
|
|
+ * if the orig_node is also a gateway on the soft interface, otherwise it
|
|
+ * returns 0.
|
|
+ *
|
|
+ **/
|
|
+
|
|
+int bla_is_backbone_gw(struct sk_buff *skb,
|
|
+ struct orig_node *orig_node, int hdr_size)
|
|
+{
|
|
+ struct ethhdr *ethhdr;
|
|
+ struct vlan_ethhdr *vhdr;
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ short vid = -1;
|
|
+
|
|
+ if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
|
|
+ return 0;
|
|
+
|
|
+ /* first, find out the vid. */
|
|
+ if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr)))
|
|
+ return 0;
|
|
+
|
|
+ ethhdr = (struct ethhdr *) (((uint8_t *)skb->data) + hdr_size);
|
|
+
|
|
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
|
|
+ if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
|
|
+ return 0;
|
|
+
|
|
+ vhdr = (struct vlan_ethhdr *) (((uint8_t *)skb->data) +
|
|
+ hdr_size);
|
|
+ vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
|
|
+ }
|
|
+
|
|
+ /* see if this originator is a backbone gw for this VLAN */
|
|
+
|
|
+ backbone_gw = backbone_hash_find(orig_node->bat_priv,
|
|
+ orig_node->orig, vid);
|
|
+ if (!backbone_gw)
|
|
+ return 0;
|
|
+
|
|
+ backbone_gw_free_ref(backbone_gw);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/* free all bla structures (for softinterface free or module unload) */
|
|
+void bla_free(struct bat_priv *bat_priv)
|
|
+{
|
|
+ struct hard_iface *primary_if;
|
|
+
|
|
+ cancel_delayed_work_sync(&bat_priv->bla_work);
|
|
+ primary_if = primary_if_get_selected(bat_priv);
|
|
+
|
|
+ if (bat_priv->claim_hash) {
|
|
+ bla_purge_claims(bat_priv, primary_if, 1);
|
|
+ hash_destroy(bat_priv->claim_hash);
|
|
+ bat_priv->claim_hash = NULL;
|
|
+ }
|
|
+ if (bat_priv->backbone_hash) {
|
|
+ bla_purge_backbone_gw(bat_priv, 1);
|
|
+ hash_destroy(bat_priv->backbone_hash);
|
|
+ bat_priv->backbone_hash = NULL;
|
|
+ }
|
|
+ if (primary_if)
|
|
+ hardif_free_ref(primary_if);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @skb: the frame to be checked
|
|
+ * @vid: the VLAN ID of the frame
|
|
+ *
|
|
+ * bla_rx avoidance checks if:
|
|
+ * * we have to race for a claim
|
|
+ * * if the frame is allowed on the LAN
|
|
+ *
|
|
+ * in these cases, the skb is further handled by this function and
|
|
+ * returns 1, otherwise it returns 0 and the caller shall further
|
|
+ * process the skb.
|
|
+ *
|
|
+ **/
|
|
+
|
|
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
|
|
+{
|
|
+ struct ethhdr *ethhdr;
|
|
+ struct claim search_claim, *claim = NULL;
|
|
+ struct hard_iface *primary_if;
|
|
+ int ret;
|
|
+
|
|
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
|
|
+
|
|
+ primary_if = primary_if_get_selected(bat_priv);
|
|
+ if (!primary_if)
|
|
+ goto handled;
|
|
+
|
|
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
|
|
+ goto allow;
|
|
+
|
|
+
|
|
+ if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
|
|
+ /* don't allow broadcasts while requests are in flight */
|
|
+ if (is_multicast_ether_addr(ethhdr->h_dest))
|
|
+ goto handled;
|
|
+
|
|
+ memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
|
|
+ search_claim.vid = vid;
|
|
+ claim = claim_hash_find(bat_priv, &search_claim);
|
|
+
|
|
+ if (!claim) {
|
|
+ /* possible optimization: race for a claim */
|
|
+ /* No claim exists yet, claim it for us! */
|
|
+ handle_claim(bat_priv, primary_if,
|
|
+ primary_if->net_dev->dev_addr,
|
|
+ ethhdr->h_source, vid);
|
|
+ goto allow;
|
|
+ }
|
|
+
|
|
+ /* if it is our own claim ... */
|
|
+ if (compare_eth(claim->backbone_gw->orig,
|
|
+ primary_if->net_dev->dev_addr)) {
|
|
+ /* ... allow it in any case */
|
|
+ claim->lasttime = jiffies;
|
|
+ goto allow;
|
|
+ }
|
|
+
|
|
+ /* if it is a broadcast ... */
|
|
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
|
|
+ /* ... drop it. the responsible gateway is in charge. */
|
|
+ goto handled;
|
|
+ } else {
|
|
+ /* seems the client considers us as its best gateway.
|
|
+ * send a claim and update the claim table
|
|
+ * immediately. */
|
|
+ handle_claim(bat_priv, primary_if,
|
|
+ primary_if->net_dev->dev_addr,
|
|
+ ethhdr->h_source, vid);
|
|
+ goto allow;
|
|
+ }
|
|
+allow:
|
|
+ bla_update_own_backbone_gw(bat_priv, primary_if, vid);
|
|
+ ret = 0;
|
|
+ goto out;
|
|
+
|
|
+handled:
|
|
+ kfree_skb(skb);
|
|
+ ret = 1;
|
|
+
|
|
+out:
|
|
+ if (primary_if)
|
|
+ hardif_free_ref(primary_if);
|
|
+ if (claim)
|
|
+ claim_free_ref(claim);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * @bat_priv: the bat priv with all the soft interface information
|
|
+ * @skb: the frame to be checked
|
|
+ * @vid: the VLAN ID of the frame
|
|
+ *
|
|
+ * bla_tx checks if:
|
|
+ * * a claim was received which has to be processed
|
|
+ * * the frame is allowed on the mesh
|
|
+ *
|
|
+ * in these cases, the skb is further handled by this function and
|
|
+ * returns 1, otherwise it returns 0 and the caller shall further
|
|
+ * process the skb.
|
|
+ *
|
|
+ **/
|
|
+
|
|
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
|
|
+{
|
|
+ struct ethhdr *ethhdr;
|
|
+ struct claim search_claim, *claim = NULL;
|
|
+ struct hard_iface *primary_if;
|
|
+ int ret = 0;
|
|
+
|
|
+ primary_if = primary_if_get_selected(bat_priv);
|
|
+ if (!primary_if)
|
|
+ goto out;
|
|
+
|
|
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
|
|
+ goto allow;
|
|
+
|
|
+ /* in VLAN case, the mac header might not be set. */
|
|
+ skb_reset_mac_header(skb);
|
|
+
|
|
+ if (bla_process_claim(bat_priv, primary_if, skb))
|
|
+ goto handled;
|
|
+
|
|
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
|
|
+
|
|
+ if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
|
|
+ /* don't allow broadcasts while requests are in flight */
|
|
+ if (is_multicast_ether_addr(ethhdr->h_dest))
|
|
+ goto handled;
|
|
+
|
|
+ memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
|
|
+ search_claim.vid = vid;
|
|
+
|
|
+ claim = claim_hash_find(bat_priv, &search_claim);
|
|
+
|
|
+ /* if no claim exists, allow it. */
|
|
+ if (!claim)
|
|
+ goto allow;
|
|
+
|
|
+ /* check if we are responsible. */
|
|
+ if (compare_eth(claim->backbone_gw->orig,
|
|
+ primary_if->net_dev->dev_addr)) {
|
|
+ /* if yes, the client has roamed and we have
|
|
+ * to unclaim it. */
|
|
+ handle_unclaim(bat_priv, primary_if,
|
|
+ primary_if->net_dev->dev_addr,
|
|
+ ethhdr->h_source, vid);
|
|
+ goto allow;
|
|
+ }
|
|
+
|
|
+ /* check if it is a multicast/broadcast frame */
|
|
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
|
|
+ /* drop it. the responsible gateway has forwarded it into
|
|
+ * the backbone network. */
|
|
+ goto handled;
|
|
+ } else {
|
|
+ /* we must allow it. at least if we are
|
|
+ * responsible for the DESTINATION. */
|
|
+ goto allow;
|
|
+ }
|
|
+allow:
|
|
+ bla_update_own_backbone_gw(bat_priv, primary_if, vid);
|
|
+ ret = 0;
|
|
+ goto out;
|
|
+handled:
|
|
+ ret = 1;
|
|
+out:
|
|
+ if (primary_if)
|
|
+ hardif_free_ref(primary_if);
|
|
+ if (claim)
|
|
+ claim_free_ref(claim);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
|
|
+{
|
|
+ struct net_device *net_dev = (struct net_device *)seq->private;
|
|
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
|
|
+ struct hashtable_t *hash = bat_priv->claim_hash;
|
|
+ struct claim *claim;
|
|
+ struct hard_iface *primary_if;
|
|
+ struct hlist_node *node;
|
|
+ struct hlist_head *head;
|
|
+ uint32_t i;
|
|
+ bool is_own;
|
|
+ int ret = 0;
|
|
+
|
|
+ primary_if = primary_if_get_selected(bat_priv);
|
|
+ if (!primary_if) {
|
|
+ ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
|
|
+ "specify interfaces to enable it\n",
|
|
+ net_dev->name);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (primary_if->if_status != IF_ACTIVE) {
|
|
+ ret = seq_printf(seq, "BATMAN mesh %s disabled - "
|
|
+ "primary interface not active\n",
|
|
+ net_dev->name);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ seq_printf(seq, "Claims announced for the mesh %s "
|
|
+ "(orig %pM, group id %04x)\n",
|
|
+ net_dev->name, primary_if->net_dev->dev_addr,
|
|
+ ntohs(bat_priv->claim_dest.group));
|
|
+ seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
|
|
+ "Client", "VID", "Originator", "CRC");
|
|
+ for (i = 0; i < hash->size; i++) {
|
|
+ head = &hash->table[i];
|
|
+
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
|
|
+ is_own = compare_eth(claim->backbone_gw->orig,
|
|
+ primary_if->net_dev->dev_addr);
|
|
+ seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
|
|
+ claim->addr, claim->vid,
|
|
+ claim->backbone_gw->orig,
|
|
+ (is_own ? 'x' : ' '),
|
|
+ claim->backbone_gw->crc);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
+out:
|
|
+ if (primary_if)
|
|
+ hardif_free_ref(primary_if);
|
|
+ return ret;
|
|
+}
|
|
diff --git a/bridge_loop_avoidance.h b/bridge_loop_avoidance.h
|
|
new file mode 100644
|
|
index 0000000..24d7f16
|
|
--- /dev/null
|
|
+++ b/bridge_loop_avoidance.h
|
|
@@ -0,0 +1,55 @@
|
|
+/*
|
|
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
|
|
+ *
|
|
+ * Simon Wunderlich
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of version 2 of the GNU General Public
|
|
+ * License as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but
|
|
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
+ * 02110-1301, USA
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _NET_BATMAN_ADV_BLA_H_
|
|
+#define _NET_BATMAN_ADV_BLA_H_
|
|
+
|
|
+#ifdef CONFIG_BATMAN_ADV_BLA
|
|
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
|
|
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
|
|
+int bla_is_backbone_gw(struct sk_buff *skb,
|
|
+ struct orig_node *orig_node, int hdr_size);
|
|
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
|
|
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
|
|
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
|
|
+ struct bcast_packet *bcast_packet, int hdr_size);
|
|
+void bla_update_orig_address(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *primary_if,
|
|
+ struct hard_iface *oldif);
|
|
+int bla_init(struct bat_priv *bat_priv);
|
|
+void bla_free(struct bat_priv *bat_priv);
|
|
+
|
|
+#define BLA_CRC_INIT 0
|
|
+#else /* ifdef CONFIG_BATMAN_ADV_BLA */
|
|
+
|
|
+#define bla_rx(...) (0)
|
|
+#define bla_tx(...) (0)
|
|
+#define bla_is_backbone_gw(...) (0)
|
|
+#define bla_claim_table_seq_print_text (0)
|
|
+#define bla_is_backbone_gw_orig(...) (0)
|
|
+#define bla_check_bcast_duplist(...) (0)
|
|
+#define bla_update_orig_address(...) {}
|
|
+#define bla_init(...) (1)
|
|
+#define bla_free(...) {}
|
|
+
|
|
+#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
|
|
+
|
|
+#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
|
|
diff --git a/compat.c b/compat.c
|
|
index 1793904..304ed6a 100644
|
|
--- a/compat.c
|
|
+++ b/compat.c
|
|
@@ -20,20 +20,23 @@ void free_rcu_neigh_node(struct rcu_head *rcu)
|
|
kfree(neigh_node);
|
|
}
|
|
|
|
-void free_rcu_softif_neigh(struct rcu_head *rcu)
|
|
+void free_rcu_tt_local_entry(struct rcu_head *rcu)
|
|
{
|
|
- struct softif_neigh *softif_neigh;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
+ struct tt_local_entry *tt_local_entry;
|
|
|
|
- softif_neigh = container_of(rcu, struct softif_neigh, rcu);
|
|
- kfree(softif_neigh);
|
|
+ tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
|
|
+ tt_local_entry = container_of(tt_common_entry, struct tt_local_entry,
|
|
+ common);
|
|
+ kfree(tt_local_entry);
|
|
}
|
|
|
|
-void free_rcu_tt_local_entry(struct rcu_head *rcu)
|
|
+void free_rcu_backbone_gw(struct rcu_head *rcu)
|
|
{
|
|
- struct tt_local_entry *tt_local_entry;
|
|
+ struct backbone_gw *backbone_gw;
|
|
|
|
- tt_local_entry = container_of(rcu, struct tt_local_entry, rcu);
|
|
- kfree(tt_local_entry);
|
|
+ backbone_gw = container_of(rcu, struct backbone_gw, rcu);
|
|
+ kfree(backbone_gw);
|
|
}
|
|
|
|
#endif /* < KERNEL_VERSION(3, 0, 0) */
|
|
diff --git a/compat.h b/compat.h
|
|
index 58c3c6a..5cc9e32 100644
|
|
--- a/compat.h
|
|
+++ b/compat.h
|
|
@@ -27,6 +27,13 @@
|
|
|
|
#include <linux/version.h> /* LINUX_VERSION_CODE */
|
|
|
|
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
|
|
+#include <linux/autoconf.h>
|
|
+#else
|
|
+#include <generated/autoconf.h>
|
|
+#endif
|
|
+#include "compat-autoconf.h"
|
|
+
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
|
|
|
|
#define __always_unused __attribute__((unused))
|
|
@@ -35,6 +42,7 @@
|
|
|
|
#endif /* < KERNEL_VERSION(2, 6, 33) */
|
|
|
|
+
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
|
|
|
|
#define hlist_first_rcu(head) (*((struct hlist_node **)(&(head)->first)))
|
|
@@ -49,20 +57,73 @@
|
|
|
|
#endif /* < KERNEL_VERSION(2, 6, 34) */
|
|
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
|
|
+
|
|
+#define __compat__module_param_call(p1, p2, p3, p4, p5, p6, p7) \
|
|
+ __module_param_call(p1, p2, p3, p4, p5, p7)
|
|
+
|
|
+#else
|
|
+
|
|
+#define __compat__module_param_call(p1, p2, p3, p4, p5, p6, p7) \
|
|
+ __module_param_call(p1, p2, p3, p4, p5, p6, p7)
|
|
+
|
|
+#endif /* < KERNEL_VERSION(2, 6, 31) */
|
|
+
|
|
+
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
|
|
|
|
#define __rcu
|
|
+#define IFF_BRIDGE_PORT 0 || (hard_iface->net_dev->br_port ? 1 : 0)
|
|
+
|
|
+struct kernel_param_ops {
|
|
+ /* Returns 0, or -errno. arg is in kp->arg. */
|
|
+ int (*set)(const char *val, const struct kernel_param *kp);
|
|
+ /* Returns length written or -errno. Buffer is 4k (ie. be short!) */
|
|
+ int (*get)(char *buffer, struct kernel_param *kp);
|
|
+ /* Optional function to free kp->arg when module unloaded. */
|
|
+ void (*free)(void *arg);
|
|
+};
|
|
+
|
|
+#define module_param_cb(name, ops, arg, perm) \
|
|
+ static int __compat_set_param_##name(const char *val, \
|
|
+ struct kernel_param *kp) \
|
|
+ { return (ops)->set(val, kp); } \
|
|
+ static int __compat_get_param_##name(char *buffer, \
|
|
+ struct kernel_param *kp) \
|
|
+ { return (ops)->get(buffer, kp); } \
|
|
+ __compat__module_param_call(MODULE_PARAM_PREFIX, name, \
|
|
+ __compat_set_param_##name, \
|
|
+ __compat_get_param_##name, arg, \
|
|
+ __same_type((arg), bool *), perm)
|
|
+
|
|
+static inline int __param_set_copystring(const char *val,
|
|
+ const struct kernel_param *kp)
|
|
+{
|
|
+ return param_set_copystring(val, (struct kernel_param *)kp);
|
|
+}
|
|
+#define param_set_copystring __param_set_copystring
|
|
|
|
#endif /* < KERNEL_VERSION(2, 6, 36) */
|
|
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
|
|
+
|
|
+#define kstrtoul strict_strtoul
|
|
+#define kstrtol strict_strtol
|
|
+
|
|
+#endif /* < KERNEL_VERSION(2, 6, 39) */
|
|
+
|
|
+
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
|
|
|
|
#define kfree_rcu(ptr, rcu_head) call_rcu(&ptr->rcu_head, free_rcu_##ptr)
|
|
+#define vlan_insert_tag(skb, vid) __vlan_put_tag(skb, vid)
|
|
|
|
void free_rcu_gw_node(struct rcu_head *rcu);
|
|
void free_rcu_neigh_node(struct rcu_head *rcu);
|
|
-void free_rcu_softif_neigh(struct rcu_head *rcu);
|
|
void free_rcu_tt_local_entry(struct rcu_head *rcu);
|
|
+void free_rcu_backbone_gw(struct rcu_head *rcu);
|
|
|
|
#endif /* < KERNEL_VERSION(3, 0, 0) */
|
|
|
|
diff --git a/gateway_client.c b/gateway_client.c
|
|
index 619fb73..df5631e 100644
|
|
--- a/gateway_client.c
|
|
+++ b/gateway_client.c
|
|
@@ -25,6 +25,7 @@
|
|
#include "gateway_common.h"
|
|
#include "hard-interface.h"
|
|
#include "originator.h"
|
|
+#include "translation-table.h"
|
|
#include "routing.h"
|
|
#include <linux/ip.h>
|
|
#include <linux/ipv6.h>
|
|
@@ -395,7 +396,7 @@ void gw_node_purge(struct bat_priv *bat_priv)
|
|
{
|
|
struct gw_node *gw_node, *curr_gw;
|
|
struct hlist_node *node, *node_tmp;
|
|
- unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
|
|
+ unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT);
|
|
int do_deselect = 0;
|
|
|
|
curr_gw = gw_get_selected_gw_node(bat_priv);
|
|
@@ -572,108 +573,142 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
|
|
- struct orig_node *old_gw)
|
|
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
|
|
{
|
|
struct ethhdr *ethhdr;
|
|
struct iphdr *iphdr;
|
|
struct ipv6hdr *ipv6hdr;
|
|
struct udphdr *udphdr;
|
|
- struct gw_node *curr_gw;
|
|
- struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
|
|
- unsigned int header_len = 0;
|
|
- int ret = 1;
|
|
-
|
|
- if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
|
|
- return 0;
|
|
|
|
/* check for ethernet header */
|
|
- if (!pskb_may_pull(skb, header_len + ETH_HLEN))
|
|
- return 0;
|
|
+ if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
|
|
+ return false;
|
|
ethhdr = (struct ethhdr *)skb->data;
|
|
- header_len += ETH_HLEN;
|
|
+ *header_len += ETH_HLEN;
|
|
|
|
/* check for initial vlan header */
|
|
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
|
|
- if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
|
|
- return 0;
|
|
+ if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
|
|
+ return false;
|
|
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
|
|
- header_len += VLAN_HLEN;
|
|
+ *header_len += VLAN_HLEN;
|
|
}
|
|
|
|
/* check for ip header */
|
|
switch (ntohs(ethhdr->h_proto)) {
|
|
case ETH_P_IP:
|
|
- if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
|
|
- return 0;
|
|
- iphdr = (struct iphdr *)(skb->data + header_len);
|
|
- header_len += iphdr->ihl * 4;
|
|
+ if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
|
|
+ return false;
|
|
+ iphdr = (struct iphdr *)(skb->data + *header_len);
|
|
+ *header_len += iphdr->ihl * 4;
|
|
|
|
/* check for udp header */
|
|
if (iphdr->protocol != IPPROTO_UDP)
|
|
- return 0;
|
|
+ return false;
|
|
|
|
break;
|
|
case ETH_P_IPV6:
|
|
- if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
|
|
- return 0;
|
|
- ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
|
|
- header_len += sizeof(*ipv6hdr);
|
|
+ if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
|
|
+ return false;
|
|
+ ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
|
|
+ *header_len += sizeof(*ipv6hdr);
|
|
|
|
/* check for udp header */
|
|
if (ipv6hdr->nexthdr != IPPROTO_UDP)
|
|
- return 0;
|
|
+ return false;
|
|
|
|
break;
|
|
default:
|
|
- return 0;
|
|
+ return false;
|
|
}
|
|
|
|
- if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
|
|
- return 0;
|
|
- udphdr = (struct udphdr *)(skb->data + header_len);
|
|
- header_len += sizeof(*udphdr);
|
|
+ if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
|
|
+ return false;
|
|
+ udphdr = (struct udphdr *)(skb->data + *header_len);
|
|
+ *header_len += sizeof(*udphdr);
|
|
|
|
/* check for bootp port */
|
|
if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
|
|
(ntohs(udphdr->dest) != 67))
|
|
- return 0;
|
|
+ return false;
|
|
|
|
if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
|
|
(ntohs(udphdr->dest) != 547))
|
|
- return 0;
|
|
+ return false;
|
|
|
|
- if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
|
|
- return -1;
|
|
+ return true;
|
|
+}
|
|
|
|
- curr_gw = gw_get_selected_gw_node(bat_priv);
|
|
- if (!curr_gw)
|
|
- return 0;
|
|
-
|
|
- /* If old_gw != NULL then this packet is unicast.
|
|
- * So, at this point we have to check the message type: if it is a
|
|
- * DHCPREQUEST we have to decide whether to drop it or not */
|
|
- if (old_gw && curr_gw->orig_node != old_gw) {
|
|
- if (is_type_dhcprequest(skb, header_len)) {
|
|
- /* If the dhcp packet has been sent to a different gw,
|
|
- * we have to evaluate whether the old gw is still
|
|
- * reliable enough */
|
|
- neigh_curr = find_router(bat_priv, curr_gw->orig_node,
|
|
- NULL);
|
|
- neigh_old = find_router(bat_priv, old_gw, NULL);
|
|
- if (!neigh_curr || !neigh_old)
|
|
- goto free_neigh;
|
|
- if (neigh_curr->tq_avg - neigh_old->tq_avg <
|
|
- GW_THRESHOLD)
|
|
- ret = -1;
|
|
- }
|
|
+bool gw_out_of_range(struct bat_priv *bat_priv,
|
|
+ struct sk_buff *skb, struct ethhdr *ethhdr)
|
|
+{
|
|
+ struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
|
|
+ struct orig_node *orig_dst_node = NULL;
|
|
+ struct gw_node *curr_gw = NULL;
|
|
+ bool ret, out_of_range = false;
|
|
+ unsigned int header_len = 0;
|
|
+ uint8_t curr_tq_avg;
|
|
+
|
|
+ ret = gw_is_dhcp_target(skb, &header_len);
|
|
+ if (!ret)
|
|
+ goto out;
|
|
+
|
|
+ orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
|
|
+ ethhdr->h_dest);
|
|
+ if (!orig_dst_node)
|
|
+ goto out;
|
|
+
|
|
+ if (!orig_dst_node->gw_flags)
|
|
+ goto out;
|
|
+
|
|
+ ret = is_type_dhcprequest(skb, header_len);
|
|
+ if (!ret)
|
|
+ goto out;
|
|
+
|
|
+ switch (atomic_read(&bat_priv->gw_mode)) {
|
|
+ case GW_MODE_SERVER:
|
|
+ /* If we are a GW then we are our best GW. We can artificially
|
|
+ * set the tq towards ourself as the maximum value */
|
|
+ curr_tq_avg = TQ_MAX_VALUE;
|
|
+ break;
|
|
+ case GW_MODE_CLIENT:
|
|
+ curr_gw = gw_get_selected_gw_node(bat_priv);
|
|
+ if (!curr_gw)
|
|
+ goto out;
|
|
+
|
|
+ /* packet is going to our gateway */
|
|
+ if (curr_gw->orig_node == orig_dst_node)
|
|
+ goto out;
|
|
+
|
|
+ /* If the dhcp packet has been sent to a different gw,
|
|
+ * we have to evaluate whether the old gw is still
|
|
+ * reliable enough */
|
|
+ neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
|
|
+ if (!neigh_curr)
|
|
+ goto out;
|
|
+
|
|
+ curr_tq_avg = neigh_curr->tq_avg;
|
|
+ break;
|
|
+ case GW_MODE_OFF:
|
|
+ default:
|
|
+ goto out;
|
|
}
|
|
-free_neigh:
|
|
+
|
|
+ neigh_old = find_router(bat_priv, orig_dst_node, NULL);
|
|
+ if (!neigh_old)
|
|
+ goto out;
|
|
+
|
|
+ if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
|
|
+ out_of_range = true;
|
|
+
|
|
+out:
|
|
+ if (orig_dst_node)
|
|
+ orig_node_free_ref(orig_dst_node);
|
|
+ if (curr_gw)
|
|
+ gw_node_free_ref(curr_gw);
|
|
if (neigh_old)
|
|
neigh_node_free_ref(neigh_old);
|
|
if (neigh_curr)
|
|
neigh_node_free_ref(neigh_curr);
|
|
- if (curr_gw)
|
|
- gw_node_free_ref(curr_gw);
|
|
- return ret;
|
|
+ return out_of_range;
|
|
}
|
|
diff --git a/gateway_client.h b/gateway_client.h
|
|
index b9b983c..e1edba0 100644
|
|
--- a/gateway_client.h
|
|
+++ b/gateway_client.h
|
|
@@ -31,7 +31,8 @@ void gw_node_update(struct bat_priv *bat_priv,
|
|
void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
|
|
void gw_node_purge(struct bat_priv *bat_priv);
|
|
int gw_client_seq_print_text(struct seq_file *seq, void *offset);
|
|
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
|
|
- struct orig_node *old_gw);
|
|
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
|
|
+bool gw_out_of_range(struct bat_priv *bat_priv,
|
|
+ struct sk_buff *skb, struct ethhdr *ethhdr);
|
|
|
|
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
|
|
diff --git a/gateway_common.c b/gateway_common.c
|
|
index 18661af..c4ac7b0 100644
|
|
--- a/gateway_common.c
|
|
+++ b/gateway_common.c
|
|
@@ -97,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
|
|
*tmp_ptr = '\0';
|
|
}
|
|
|
|
- ret = strict_strtol(buff, 10, &ldown);
|
|
+ ret = kstrtol(buff, 10, &ldown);
|
|
if (ret) {
|
|
bat_err(net_dev,
|
|
"Download speed of gateway mode invalid: %s\n",
|
|
@@ -122,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
|
|
*tmp_ptr = '\0';
|
|
}
|
|
|
|
- ret = strict_strtol(slash_ptr + 1, 10, &lup);
|
|
+ ret = kstrtol(slash_ptr + 1, 10, &lup);
|
|
if (ret) {
|
|
bat_err(net_dev,
|
|
"Upload speed of gateway mode invalid: "
|
|
diff --git a/gen-compat-autoconf.sh b/gen-compat-autoconf.sh
|
|
new file mode 100755
|
|
index 0000000..7cf621b
|
|
--- /dev/null
|
|
+++ b/gen-compat-autoconf.sh
|
|
@@ -0,0 +1,43 @@
|
|
+#! /bin/sh
|
|
+
|
|
+set -e
|
|
+
|
|
+TARGET=${1:="compat-autoconf.h"}
|
|
+TMP="${TARGET}.tmp"
|
|
+
|
|
+echo -n > "${TMP}"
|
|
+
|
|
+gen_config() {
|
|
+ KEY="${1}"
|
|
+ VALUE="${2}"
|
|
+
|
|
+ echo "#undef ${KEY}"
|
|
+ echo "#undef __enabled_${KEY}"
|
|
+ echo "#undef __enabled_${KEY}_MODULE"
|
|
+ case "${VALUE}" in
|
|
+ y)
|
|
+ echo "#define ${KEY} 1"
|
|
+ echo "#define __enabled_${KEY} 1"
|
|
+ echo "#define __enabled_${KEY}_MODULE 0"
|
|
+ ;;
|
|
+ m)
|
|
+ echo "#define ${KEY} 1"
|
|
+ echo "#define __enabled_${KEY} 0"
|
|
+ echo "#define __enabled_${KEY}_MODULE 1"
|
|
+ ;;
|
|
+ n)
|
|
+ echo "#define __enabled_${KEY} 0"
|
|
+ echo "#define __enabled_${KEY}_MODULE 0"
|
|
+ ;;
|
|
+ *)
|
|
+ echo "#define ${KEY} \"${VALUE}\""
|
|
+ ;;
|
|
+ esac
|
|
+}
|
|
+
|
|
+# write config variables
|
|
+gen_config 'CONFIG_BATMAN_ADV_DEBUG' ${CONFIG_BATMAN_ADV_DEBUG:="n"} >> "${TMP}"
|
|
+gen_config 'CONFIG_BATMAN_ADV_BLA' ${CONFIG_BATMAN_ADV_BLA:="y"} >> "${TMP}"
|
|
+
|
|
+# only regenerate compat-autoconf.h when config was changed
|
|
+diff "${TMP}" "${TARGET}" > /dev/null 2>&1 || cp "${TMP}" "${TARGET}"
|
|
diff --git a/hard-interface.c b/hard-interface.c
|
|
index 7704df4..cc13363 100644
|
|
--- a/hard-interface.c
|
|
+++ b/hard-interface.c
|
|
@@ -28,7 +28,7 @@
|
|
#include "bat_sysfs.h"
|
|
#include "originator.h"
|
|
#include "hash.h"
|
|
-#include "bat_ogm.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
@@ -108,7 +108,8 @@ out:
|
|
return hard_iface;
|
|
}
|
|
|
|
-static void primary_if_update_addr(struct bat_priv *bat_priv)
|
|
+static void primary_if_update_addr(struct bat_priv *bat_priv,
|
|
+ struct hard_iface *oldif)
|
|
{
|
|
struct vis_packet *vis_packet;
|
|
struct hard_iface *primary_if;
|
|
@@ -123,6 +124,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv)
|
|
memcpy(vis_packet->sender_orig,
|
|
primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
|
|
+ bla_update_orig_address(bat_priv, primary_if, oldif);
|
|
out:
|
|
if (primary_if)
|
|
hardif_free_ref(primary_if);
|
|
@@ -141,14 +143,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
|
|
curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
|
|
rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
|
|
|
|
- if (curr_hard_iface)
|
|
- hardif_free_ref(curr_hard_iface);
|
|
-
|
|
if (!new_hard_iface)
|
|
- return;
|
|
+ goto out;
|
|
+
|
|
+ bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
|
|
+ primary_if_update_addr(bat_priv, curr_hard_iface);
|
|
|
|
- bat_ogm_init_primary(new_hard_iface);
|
|
- primary_if_update_addr(bat_priv);
|
|
+out:
|
|
+ if (curr_hard_iface)
|
|
+ hardif_free_ref(curr_hard_iface);
|
|
}
|
|
|
|
static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
|
|
@@ -233,7 +236,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
|
|
|
|
bat_priv = netdev_priv(hard_iface->soft_iface);
|
|
|
|
- bat_ogm_update_mac(hard_iface);
|
|
+ bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
|
|
hard_iface->if_status = IF_TO_BE_ACTIVATED;
|
|
|
|
/**
|
|
@@ -281,6 +284,14 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
|
|
if (!atomic_inc_not_zero(&hard_iface->refcount))
|
|
goto out;
|
|
|
|
+ /* hard-interface is part of a bridge */
|
|
+ if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
|
|
+ pr_err("You are about to enable batman-adv on '%s' which "
|
|
+ "already is part of a bridge. Unless you know exactly "
|
|
+ "what you are doing this is probably wrong and won't "
|
|
+ "work the way you think it would.\n",
|
|
+ hard_iface->net_dev->name);
|
|
+
|
|
soft_iface = dev_get_by_name(&init_net, iface_name);
|
|
|
|
if (!soft_iface) {
|
|
@@ -307,7 +318,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
|
|
hard_iface->soft_iface = soft_iface;
|
|
bat_priv = netdev_priv(hard_iface->soft_iface);
|
|
|
|
- bat_ogm_init(hard_iface);
|
|
+ bat_priv->bat_algo_ops->bat_ogm_init(hard_iface);
|
|
|
|
if (!hard_iface->packet_buff) {
|
|
bat_err(hard_iface->soft_iface, "Can't add interface packet "
|
|
@@ -527,15 +538,16 @@ static int hard_if_event(struct notifier_block *this,
|
|
goto hardif_put;
|
|
|
|
check_known_mac_addr(hard_iface->net_dev);
|
|
- bat_ogm_update_mac(hard_iface);
|
|
|
|
bat_priv = netdev_priv(hard_iface->soft_iface);
|
|
+ bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
|
|
+
|
|
primary_if = primary_if_get_selected(bat_priv);
|
|
if (!primary_if)
|
|
goto hardif_put;
|
|
|
|
if (hard_iface == primary_if)
|
|
- primary_if_update_addr(bat_priv);
|
|
+ primary_if_update_addr(bat_priv, NULL);
|
|
break;
|
|
default:
|
|
break;
|
|
@@ -590,17 +602,17 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
|
|
|
|
- if (batman_ogm_packet->version != COMPAT_VERSION) {
|
|
+ if (batman_ogm_packet->header.version != COMPAT_VERSION) {
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Drop packet: incompatible batman version (%i)\n",
|
|
- batman_ogm_packet->version);
|
|
+ batman_ogm_packet->header.version);
|
|
goto err_free;
|
|
}
|
|
|
|
/* all receive handlers return whether they received or reused
|
|
* the supplied skb. if not, we have to free the skb. */
|
|
|
|
- switch (batman_ogm_packet->packet_type) {
|
|
+ switch (batman_ogm_packet->header.packet_type) {
|
|
/* batman originator packet */
|
|
case BAT_OGM:
|
|
ret = recv_bat_ogm_packet(skb, hard_iface);
|
|
diff --git a/hash.c b/hash.c
|
|
index 2a17250..d1da29d 100644
|
|
--- a/hash.c
|
|
+++ b/hash.c
|
|
@@ -25,7 +25,7 @@
|
|
/* clears the hash */
|
|
static void hash_init(struct hashtable_t *hash)
|
|
{
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
for (i = 0 ; i < hash->size; i++) {
|
|
INIT_HLIST_HEAD(&hash->table[i]);
|
|
@@ -42,7 +42,7 @@ void hash_destroy(struct hashtable_t *hash)
|
|
}
|
|
|
|
/* allocates and clears the hash */
|
|
-struct hashtable_t *hash_new(int size)
|
|
+struct hashtable_t *hash_new(uint32_t size)
|
|
{
|
|
struct hashtable_t *hash;
|
|
|
|
diff --git a/hash.h b/hash.h
|
|
index d20aa71..4768717 100644
|
|
--- a/hash.h
|
|
+++ b/hash.h
|
|
@@ -33,17 +33,17 @@ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
|
|
/* the hashfunction, should return an index
|
|
* based on the key in the data of the first
|
|
* argument and the size the second */
|
|
-typedef int (*hashdata_choose_cb)(const void *, int);
|
|
+typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
|
|
typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
|
|
|
|
struct hashtable_t {
|
|
struct hlist_head *table; /* the hashtable itself with the buckets */
|
|
spinlock_t *list_locks; /* spinlock for each hash list entry */
|
|
- int size; /* size of hashtable */
|
|
+ uint32_t size; /* size of hashtable */
|
|
};
|
|
|
|
/* allocates and clears the hash */
|
|
-struct hashtable_t *hash_new(int size);
|
|
+struct hashtable_t *hash_new(uint32_t size);
|
|
|
|
/* free only the hashtable and the hash itself. */
|
|
void hash_destroy(struct hashtable_t *hash);
|
|
@@ -57,7 +57,7 @@ static inline void hash_delete(struct hashtable_t *hash,
|
|
struct hlist_head *head;
|
|
struct hlist_node *node, *node_tmp;
|
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
@@ -93,7 +93,8 @@ static inline int hash_add(struct hashtable_t *hash,
|
|
hashdata_choose_cb choose,
|
|
const void *data, struct hlist_node *data_node)
|
|
{
|
|
- int index, ret = -1;
|
|
+ uint32_t index;
|
|
+ int ret = -1;
|
|
struct hlist_head *head;
|
|
struct hlist_node *node;
|
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
|
@@ -137,7 +138,7 @@ static inline void *hash_remove(struct hashtable_t *hash,
|
|
hashdata_compare_cb compare,
|
|
hashdata_choose_cb choose, void *data)
|
|
{
|
|
- size_t index;
|
|
+ uint32_t index;
|
|
struct hlist_node *node;
|
|
struct hlist_head *head;
|
|
void *data_save = NULL;
|
|
diff --git a/icmp_socket.c b/icmp_socket.c
|
|
index ac3520e..5d69e10 100644
|
|
--- a/icmp_socket.c
|
|
+++ b/icmp_socket.c
|
|
@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
|
|
|
|
spin_unlock_bh(&socket_client->lock);
|
|
|
|
- error = __copy_to_user(buf, &socket_packet->icmp_packet,
|
|
- socket_packet->icmp_len);
|
|
+ packet_len = min(count, socket_packet->icmp_len);
|
|
+ error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
|
|
|
|
- packet_len = socket_packet->icmp_len;
|
|
kfree(socket_packet);
|
|
|
|
if (error)
|
|
@@ -187,17 +186,12 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
|
|
skb_reserve(skb, sizeof(struct ethhdr));
|
|
icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
|
|
|
|
- if (!access_ok(VERIFY_READ, buff, packet_len)) {
|
|
+ if (copy_from_user(icmp_packet, buff, packet_len)) {
|
|
len = -EFAULT;
|
|
goto free_skb;
|
|
}
|
|
|
|
- if (__copy_from_user(icmp_packet, buff, packet_len)) {
|
|
- len = -EFAULT;
|
|
- goto free_skb;
|
|
- }
|
|
-
|
|
- if (icmp_packet->packet_type != BAT_ICMP) {
|
|
+ if (icmp_packet->header.packet_type != BAT_ICMP) {
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Error - can't send packet from char device: "
|
|
"got bogus packet type (expected: BAT_ICMP)\n");
|
|
@@ -215,9 +209,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
|
|
|
|
icmp_packet->uid = socket_client->index;
|
|
|
|
- if (icmp_packet->version != COMPAT_VERSION) {
|
|
+ if (icmp_packet->header.version != COMPAT_VERSION) {
|
|
icmp_packet->msg_type = PARAMETER_PROBLEM;
|
|
- icmp_packet->ttl = COMPAT_VERSION;
|
|
+ icmp_packet->header.version = COMPAT_VERSION;
|
|
bat_socket_add_packet(socket_client, icmp_packet, packet_len);
|
|
goto free_skb;
|
|
}
|
|
diff --git a/main.c b/main.c
|
|
index fb87bdc..6df246f 100644
|
|
--- a/main.c
|
|
+++ b/main.c
|
|
@@ -30,13 +30,17 @@
|
|
#include "translation-table.h"
|
|
#include "hard-interface.h"
|
|
#include "gateway_client.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
#include "vis.h"
|
|
#include "hash.h"
|
|
+#include "bat_algo.h"
|
|
|
|
|
|
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
|
|
* list traversals just rcu-locked */
|
|
struct list_head hardif_list;
|
|
+char bat_routing_algo[20] = "BATMAN IV";
|
|
+static struct hlist_head bat_algo_list;
|
|
|
|
unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
|
|
|
@@ -45,6 +49,9 @@ struct workqueue_struct *bat_event_workqueue;
|
|
static int __init batman_init(void)
|
|
{
|
|
INIT_LIST_HEAD(&hardif_list);
|
|
+ INIT_HLIST_HEAD(&bat_algo_list);
|
|
+
|
|
+ bat_iv_init();
|
|
|
|
/* the name should not be longer than 10 chars - see
|
|
* http://lwn.net/Articles/23634/ */
|
|
@@ -90,13 +97,10 @@ int mesh_init(struct net_device *soft_iface)
|
|
spin_lock_init(&bat_priv->gw_list_lock);
|
|
spin_lock_init(&bat_priv->vis_hash_lock);
|
|
spin_lock_init(&bat_priv->vis_list_lock);
|
|
- spin_lock_init(&bat_priv->softif_neigh_lock);
|
|
- spin_lock_init(&bat_priv->softif_neigh_vid_lock);
|
|
|
|
INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
|
|
INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
|
|
INIT_HLIST_HEAD(&bat_priv->gw_list);
|
|
- INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
|
|
INIT_LIST_HEAD(&bat_priv->tt_changes_list);
|
|
INIT_LIST_HEAD(&bat_priv->tt_req_list);
|
|
INIT_LIST_HEAD(&bat_priv->tt_roam_list);
|
|
@@ -112,6 +116,9 @@ int mesh_init(struct net_device *soft_iface)
|
|
if (vis_init(bat_priv) < 1)
|
|
goto err;
|
|
|
|
+ if (bla_init(bat_priv) < 1)
|
|
+ goto err;
|
|
+
|
|
atomic_set(&bat_priv->gw_reselect, 0);
|
|
atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
|
|
goto end;
|
|
@@ -139,7 +146,7 @@ void mesh_free(struct net_device *soft_iface)
|
|
|
|
tt_free(bat_priv);
|
|
|
|
- softif_neigh_purge(bat_priv);
|
|
+ bla_free(bat_priv);
|
|
|
|
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
|
|
}
|
|
@@ -170,9 +177,110 @@ int is_my_mac(const uint8_t *addr)
|
|
}
|
|
rcu_read_unlock();
|
|
return 0;
|
|
+}
|
|
+
|
|
+static struct bat_algo_ops *bat_algo_get(char *name)
|
|
+{
|
|
+ struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
|
|
+ struct hlist_node *node;
|
|
+
|
|
+ hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
|
|
+ if (strcmp(bat_algo_ops_tmp->name, name) != 0)
|
|
+ continue;
|
|
+
|
|
+ bat_algo_ops = bat_algo_ops_tmp;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return bat_algo_ops;
|
|
+}
|
|
+
|
|
+int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
|
|
+{
|
|
+ struct bat_algo_ops *bat_algo_ops_tmp;
|
|
+ int ret = -1;
|
|
+
|
|
+ bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
|
|
+ if (bat_algo_ops_tmp) {
|
|
+ pr_info("Trying to register already registered routing "
|
|
+ "algorithm: %s\n", bat_algo_ops->name);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* all algorithms must implement all ops (for now) */
|
|
+ if (!bat_algo_ops->bat_ogm_init ||
|
|
+ !bat_algo_ops->bat_ogm_init_primary ||
|
|
+ !bat_algo_ops->bat_ogm_update_mac ||
|
|
+ !bat_algo_ops->bat_ogm_schedule ||
|
|
+ !bat_algo_ops->bat_ogm_emit ||
|
|
+ !bat_algo_ops->bat_ogm_receive) {
|
|
+ pr_info("Routing algo '%s' does not implement required ops\n",
|
|
+ bat_algo_ops->name);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ INIT_HLIST_NODE(&bat_algo_ops->list);
|
|
+ hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
|
|
+ ret = 0;
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int bat_algo_select(struct bat_priv *bat_priv, char *name)
|
|
+{
|
|
+ struct bat_algo_ops *bat_algo_ops;
|
|
+ int ret = -1;
|
|
+
|
|
+ bat_algo_ops = bat_algo_get(name);
|
|
+ if (!bat_algo_ops)
|
|
+ goto out;
|
|
+
|
|
+ bat_priv->bat_algo_ops = bat_algo_ops;
|
|
+ ret = 0;
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
|
|
+{
|
|
+ struct bat_algo_ops *bat_algo_ops;
|
|
+ struct hlist_node *node;
|
|
+
|
|
+ seq_printf(seq, "Available routing algorithms:\n");
|
|
+
|
|
+ hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
|
|
+ seq_printf(seq, "%s\n", bat_algo_ops->name);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int param_set_ra(const char *val, const struct kernel_param *kp)
|
|
+{
|
|
+ struct bat_algo_ops *bat_algo_ops;
|
|
+
|
|
+ bat_algo_ops = bat_algo_get((char *)val);
|
|
+ if (!bat_algo_ops) {
|
|
+ pr_err("Routing algorithm '%s' is not supported\n", val);
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
+ return param_set_copystring(val, kp);
|
|
}
|
|
|
|
+static const struct kernel_param_ops param_ops_ra = {
|
|
+ .set = param_set_ra,
|
|
+ .get = param_get_string,
|
|
+};
|
|
+
|
|
+static struct kparam_string __param_string_ra = {
|
|
+ .maxlen = sizeof(bat_routing_algo),
|
|
+ .string = bat_routing_algo,
|
|
+};
|
|
+
|
|
+module_param_cb(routing_algo, ¶m_ops_ra, &__param_string_ra, 0644);
|
|
module_init(batman_init);
|
|
module_exit(batman_exit);
|
|
|
|
diff --git a/main.h b/main.h
|
|
index 53e5d9f..8128f59 100644
|
|
--- a/main.h
|
|
+++ b/main.h
|
|
@@ -28,7 +28,7 @@
|
|
#define DRIVER_DEVICE "batman-adv"
|
|
|
|
#ifndef SOURCE_VERSION
|
|
-#define SOURCE_VERSION "2011.4.0"
|
|
+#define SOURCE_VERSION "2012.0.0"
|
|
#endif
|
|
|
|
/* B.A.T.M.A.N. parameters */
|
|
@@ -41,13 +41,14 @@
|
|
|
|
/* purge originators after time in seconds if no valid packet comes in
|
|
* -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
|
|
-#define PURGE_TIMEOUT 200
|
|
-#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
|
|
-#define TT_CLIENT_ROAM_TIMEOUT 600
|
|
+#define PURGE_TIMEOUT 200000 /* 200 seconds */
|
|
+#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
|
|
+#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
|
|
/* sliding packet range of received originator messages in sequence numbers
|
|
* (should be a multiple of our word size) */
|
|
#define TQ_LOCAL_WINDOW_SIZE 64
|
|
-#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
|
|
+#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep
|
|
+ * pending tt_req */
|
|
|
|
#define TQ_GLOBAL_WINDOW_SIZE 5
|
|
#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
|
|
@@ -56,8 +57,8 @@
|
|
|
|
#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
|
|
|
|
-#define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most
|
|
- * ROAMING_MAX_COUNT times */
|
|
+#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most
|
|
+ * ROAMING_MAX_COUNT times in miliseconds*/
|
|
#define ROAMING_MAX_COUNT 5
|
|
|
|
#define NO_FLAGS 0
|
|
@@ -79,8 +80,12 @@
|
|
#define MAX_AGGREGATION_BYTES 512
|
|
#define MAX_AGGREGATION_MS 100
|
|
|
|
-#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */
|
|
+#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */
|
|
+#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3)
|
|
+#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10)
|
|
|
|
+#define DUPLIST_SIZE 16
|
|
+#define DUPLIST_TIMEOUT 500 /* 500 ms */
|
|
/* don't reset again within 30 seconds */
|
|
#define RESET_PROTECTION_MS 30000
|
|
#define EXPECTED_SEQNO_RANGE 65536
|
|
@@ -120,7 +125,8 @@ enum dbg_level {
|
|
DBG_BATMAN = 1 << 0,
|
|
DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
|
|
DBG_TT = 1 << 2, /* translation table operations */
|
|
- DBG_ALL = 7
|
|
+ DBG_BLA = 1 << 3, /* bridge loop avoidance */
|
|
+ DBG_ALL = 15
|
|
};
|
|
|
|
|
|
@@ -149,6 +155,7 @@ enum dbg_level {
|
|
|
|
#include "types.h"
|
|
|
|
+extern char bat_routing_algo[];
|
|
extern struct list_head hardif_list;
|
|
|
|
extern unsigned char broadcast_addr[];
|
|
@@ -159,6 +166,9 @@ void mesh_free(struct net_device *soft_iface);
|
|
void inc_module_count(void);
|
|
void dec_module_count(void);
|
|
int is_my_mac(const uint8_t *addr);
|
|
+int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
|
|
+int bat_algo_select(struct bat_priv *bat_priv, char *name);
|
|
+int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
|
|
|
|
#ifdef CONFIG_BATMAN_ADV_DEBUG
|
|
int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
|
|
@@ -204,6 +214,17 @@ static inline int compare_eth(const void *data1, const void *data2)
|
|
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
|
|
}
|
|
|
|
+/**
|
|
+ * has_timed_out - compares current time (jiffies) and timestamp + timeout
|
|
+ * @timestamp: base value to compare with (in jiffies)
|
|
+ * @timeout: added to base value before comparing (in milliseconds)
|
|
+ *
|
|
+ * Returns true if current time is after timestamp + timeout
|
|
+ */
|
|
+static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
|
|
+{
|
|
+ return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
|
|
+}
|
|
|
|
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
|
|
|
|
diff --git a/originator.c b/originator.c
|
|
index 0e5b772..f2c2a6d 100644
|
|
--- a/originator.c
|
|
+++ b/originator.c
|
|
@@ -28,6 +28,7 @@
|
|
#include "hard-interface.h"
|
|
#include "unicast.h"
|
|
#include "soft-interface.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
|
|
static void purge_orig(struct work_struct *work);
|
|
|
|
@@ -164,7 +165,7 @@ void originator_free(struct bat_priv *bat_priv)
|
|
struct hlist_head *head;
|
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
|
struct orig_node *orig_node;
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
if (!hash)
|
|
return;
|
|
@@ -219,6 +220,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
|
|
/* extra reference for return */
|
|
atomic_set(&orig_node->refcount, 2);
|
|
|
|
+ orig_node->tt_initialised = false;
|
|
orig_node->tt_poss_change = false;
|
|
orig_node->bat_priv = bat_priv;
|
|
memcpy(orig_node->orig, addr, ETH_ALEN);
|
|
@@ -281,8 +283,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
|
|
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
|
|
&orig_node->neigh_list, list) {
|
|
|
|
- if ((time_after(jiffies,
|
|
- neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
|
|
+ if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) ||
|
|
(neigh_node->if_incoming->if_status == IF_INACTIVE) ||
|
|
(neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
|
|
(neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
|
|
@@ -326,9 +327,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
|
|
{
|
|
struct neigh_node *best_neigh_node;
|
|
|
|
- if (time_after(jiffies,
|
|
- orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
|
|
-
|
|
+ if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) {
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
"Originator timeout: originator %pM, last_valid %lu\n",
|
|
orig_node->orig, (orig_node->last_valid / HZ));
|
|
@@ -350,7 +349,7 @@ static void _purge_orig(struct bat_priv *bat_priv)
|
|
struct hlist_head *head;
|
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
|
struct orig_node *orig_node;
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
if (!hash)
|
|
return;
|
|
@@ -371,8 +370,8 @@ static void _purge_orig(struct bat_priv *bat_priv)
|
|
continue;
|
|
}
|
|
|
|
- if (time_after(jiffies, orig_node->last_frag_packet +
|
|
- msecs_to_jiffies(FRAG_TIMEOUT)))
|
|
+ if (has_timed_out(orig_node->last_frag_packet,
|
|
+ FRAG_TIMEOUT))
|
|
frag_list_free(&orig_node->frag_list);
|
|
}
|
|
spin_unlock_bh(list_lock);
|
|
@@ -380,8 +379,6 @@ static void _purge_orig(struct bat_priv *bat_priv)
|
|
|
|
gw_node_purge(bat_priv);
|
|
gw_election(bat_priv);
|
|
-
|
|
- softif_neigh_purge(bat_priv);
|
|
}
|
|
|
|
static void purge_orig(struct work_struct *work)
|
|
@@ -413,7 +410,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
|
|
int batman_count = 0;
|
|
int last_seen_secs;
|
|
int last_seen_msecs;
|
|
- int i, ret = 0;
|
|
+ uint32_t i;
|
|
+ int ret = 0;
|
|
|
|
primary_if = primary_if_get_selected(bat_priv);
|
|
|
|
@@ -519,7 +517,8 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
|
|
struct hlist_node *node;
|
|
struct hlist_head *head;
|
|
struct orig_node *orig_node;
|
|
- int i, ret;
|
|
+ uint32_t i;
|
|
+ int ret;
|
|
|
|
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
|
|
* if_num */
|
|
@@ -601,7 +600,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
|
|
struct hlist_head *head;
|
|
struct hard_iface *hard_iface_tmp;
|
|
struct orig_node *orig_node;
|
|
- int i, ret;
|
|
+ uint32_t i;
|
|
+ int ret;
|
|
|
|
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
|
|
* if_num */
|
|
diff --git a/originator.h b/originator.h
|
|
index cfc1f60..67765ff 100644
|
|
--- a/originator.h
|
|
+++ b/originator.h
|
|
@@ -42,7 +42,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
|
|
|
|
/* hashfunction to choose an entry in a hash table of given size */
|
|
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
|
|
-static inline int choose_orig(const void *data, int32_t size)
|
|
+static inline uint32_t choose_orig(const void *data, uint32_t size)
|
|
{
|
|
const unsigned char *key = data;
|
|
uint32_t hash = 0;
|
|
diff --git a/packet.h b/packet.h
|
|
index 4d9e54c..91c4663 100644
|
|
--- a/packet.h
|
|
+++ b/packet.h
|
|
@@ -90,10 +90,30 @@ enum tt_client_flags {
|
|
TT_CLIENT_PENDING = 1 << 10
|
|
};
|
|
|
|
-struct batman_ogm_packet {
|
|
+/* claim frame types for the bridge loop avoidance */
|
|
+enum bla_claimframe {
|
|
+ CLAIM_TYPE_ADD = 0x00,
|
|
+ CLAIM_TYPE_DEL = 0x01,
|
|
+ CLAIM_TYPE_ANNOUNCE = 0x02,
|
|
+ CLAIM_TYPE_REQUEST = 0x03
|
|
+};
|
|
+
|
|
+/* the destination hardware field in the ARP frame is used to
|
|
+ * transport the claim type and the group id */
|
|
+struct bla_claim_dst {
|
|
+ uint8_t magic[3]; /* FF:43:05 */
|
|
+ uint8_t type; /* bla_claimframe */
|
|
+ uint16_t group; /* group id */
|
|
+} __packed;
|
|
+
|
|
+struct batman_header {
|
|
uint8_t packet_type;
|
|
uint8_t version; /* batman version field */
|
|
uint8_t ttl;
|
|
+} __packed;
|
|
+
|
|
+struct batman_ogm_packet {
|
|
+ struct batman_header header;
|
|
uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
|
|
uint32_t seqno;
|
|
uint8_t orig[6];
|
|
@@ -108,9 +128,7 @@ struct batman_ogm_packet {
|
|
#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
|
|
|
|
struct icmp_packet {
|
|
- uint8_t packet_type;
|
|
- uint8_t version; /* batman version field */
|
|
- uint8_t ttl;
|
|
+ struct batman_header header;
|
|
uint8_t msg_type; /* see ICMP message types above */
|
|
uint8_t dst[6];
|
|
uint8_t orig[6];
|
|
@@ -124,9 +142,7 @@ struct icmp_packet {
|
|
/* icmp_packet_rr must start with all fields from imcp_packet
|
|
* as this is assumed by code that handles ICMP packets */
|
|
struct icmp_packet_rr {
|
|
- uint8_t packet_type;
|
|
- uint8_t version; /* batman version field */
|
|
- uint8_t ttl;
|
|
+ struct batman_header header;
|
|
uint8_t msg_type; /* see ICMP message types above */
|
|
uint8_t dst[6];
|
|
uint8_t orig[6];
|
|
@@ -137,17 +153,13 @@ struct icmp_packet_rr {
|
|
} __packed;
|
|
|
|
struct unicast_packet {
|
|
- uint8_t packet_type;
|
|
- uint8_t version; /* batman version field */
|
|
- uint8_t ttl;
|
|
+ struct batman_header header;
|
|
uint8_t ttvn; /* destination translation table version number */
|
|
uint8_t dest[6];
|
|
} __packed;
|
|
|
|
struct unicast_frag_packet {
|
|
- uint8_t packet_type;
|
|
- uint8_t version; /* batman version field */
|
|
- uint8_t ttl;
|
|
+ struct batman_header header;
|
|
uint8_t ttvn; /* destination translation table version number */
|
|
uint8_t dest[6];
|
|
uint8_t flags;
|
|
@@ -157,18 +169,14 @@ struct unicast_frag_packet {
|
|
} __packed;
|
|
|
|
struct bcast_packet {
|
|
- uint8_t packet_type;
|
|
- uint8_t version; /* batman version field */
|
|
- uint8_t ttl;
|
|
+ struct batman_header header;
|
|
uint8_t reserved;
|
|
uint32_t seqno;
|
|
uint8_t orig[6];
|
|
} __packed;
|
|
|
|
struct vis_packet {
|
|
- uint8_t packet_type;
|
|
- uint8_t version; /* batman version field */
|
|
- uint8_t ttl; /* TTL */
|
|
+ struct batman_header header;
|
|
uint8_t vis_type; /* which type of vis-participant sent this? */
|
|
uint32_t seqno; /* sequence number */
|
|
uint8_t entries; /* number of entries behind this struct */
|
|
@@ -179,9 +187,7 @@ struct vis_packet {
|
|
} __packed;
|
|
|
|
struct tt_query_packet {
|
|
- uint8_t packet_type;
|
|
- uint8_t version; /* batman version field */
|
|
- uint8_t ttl;
|
|
+ struct batman_header header;
|
|
/* the flag field is a combination of:
|
|
* - TT_REQUEST or TT_RESPONSE
|
|
* - TT_FULL_TABLE */
|
|
@@ -202,9 +208,7 @@ struct tt_query_packet {
|
|
} __packed;
|
|
|
|
struct roam_adv_packet {
|
|
- uint8_t packet_type;
|
|
- uint8_t version;
|
|
- uint8_t ttl;
|
|
+ struct batman_header header;
|
|
uint8_t reserved;
|
|
uint8_t dst[ETH_ALEN];
|
|
uint8_t src[ETH_ALEN];
|
|
diff --git a/routing.c b/routing.c
|
|
index f961cc5..6a576aa 100644
|
|
--- a/routing.c
|
|
+++ b/routing.c
|
|
@@ -29,7 +29,7 @@
|
|
#include "originator.h"
|
|
#include "vis.h"
|
|
#include "unicast.h"
|
|
-#include "bat_ogm.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
|
|
void slide_own_bcast_window(struct hard_iface *hard_iface)
|
|
{
|
|
@@ -39,7 +39,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
|
|
struct hlist_head *head;
|
|
struct orig_node *orig_node;
|
|
unsigned long *word;
|
|
- int i;
|
|
+ uint32_t i;
|
|
size_t word_index;
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
@@ -232,8 +232,7 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
|
|
{
|
|
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
|
|
|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
|
|
- if (time_after(jiffies, *last_reset +
|
|
- msecs_to_jiffies(RESET_PROTECTION_MS))) {
|
|
+ if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
|
|
|
|
*last_reset = jiffies;
|
|
bat_dbg(DBG_BATMAN, bat_priv,
|
|
@@ -248,6 +247,7 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
|
|
|
|
int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
|
|
{
|
|
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
|
struct ethhdr *ethhdr;
|
|
|
|
/* drop packet if it has not necessary minimum size */
|
|
@@ -272,9 +272,7 @@ int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
|
|
if (skb_linearize(skb) < 0)
|
|
return NET_RX_DROP;
|
|
|
|
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
|
|
-
|
|
- bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface);
|
|
+ bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
|
|
|
|
kfree_skb(skb);
|
|
return NET_RX_SUCCESS;
|
|
@@ -320,7 +318,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
|
|
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
|
|
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
icmp_packet->msg_type = ECHO_REPLY;
|
|
- icmp_packet->ttl = TTL;
|
|
+ icmp_packet->header.ttl = TTL;
|
|
|
|
send_skb_packet(skb, router->if_incoming, router->addr);
|
|
ret = NET_RX_SUCCESS;
|
|
@@ -376,7 +374,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
|
|
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
|
|
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
icmp_packet->msg_type = TTL_EXCEEDED;
|
|
- icmp_packet->ttl = TTL;
|
|
+ icmp_packet->header.ttl = TTL;
|
|
|
|
send_skb_packet(skb, router->if_incoming, router->addr);
|
|
ret = NET_RX_SUCCESS;
|
|
@@ -441,7 +439,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
return recv_my_icmp_packet(bat_priv, skb, hdr_size);
|
|
|
|
/* TTL exceeded */
|
|
- if (icmp_packet->ttl < 2)
|
|
+ if (icmp_packet->header.ttl < 2)
|
|
return recv_icmp_ttl_exceeded(bat_priv, skb);
|
|
|
|
/* get routing information */
|
|
@@ -460,7 +458,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
icmp_packet = (struct icmp_packet_rr *)skb->data;
|
|
|
|
/* decrement ttl */
|
|
- icmp_packet->ttl--;
|
|
+ icmp_packet->header.ttl--;
|
|
|
|
/* route it */
|
|
send_skb_packet(skb, router->if_incoming, router->addr);
|
|
@@ -578,6 +576,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
|
|
struct tt_query_packet *tt_query;
|
|
+ uint16_t tt_len;
|
|
struct ethhdr *ethhdr;
|
|
|
|
/* drop packet if it has not necessary minimum size */
|
|
@@ -616,13 +615,21 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
}
|
|
break;
|
|
case TT_RESPONSE:
|
|
- /* packet needs to be linearized to access the TT changes */
|
|
- if (skb_linearize(skb) < 0)
|
|
- goto out;
|
|
+ if (is_my_mac(tt_query->dst)) {
|
|
+ /* packet needs to be linearized to access the TT
|
|
+ * changes */
|
|
+ if (skb_linearize(skb) < 0)
|
|
+ goto out;
|
|
+
|
|
+ tt_len = tt_query->tt_data * sizeof(struct tt_change);
|
|
+
|
|
+ /* Ensure we have all the claimed data */
|
|
+ if (unlikely(skb_headlen(skb) <
|
|
+ sizeof(struct tt_query_packet) + tt_len))
|
|
+ goto out;
|
|
|
|
- if (is_my_mac(tt_query->dst))
|
|
handle_tt_response(bat_priv, tt_query);
|
|
- else {
|
|
+ } else {
|
|
bat_dbg(DBG_TT, bat_priv,
|
|
"Routing TT_RESPONSE to %pM [%c]\n",
|
|
tt_query->dst,
|
|
@@ -664,6 +671,12 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
if (!is_my_mac(roam_adv_packet->dst))
|
|
return route_unicast_packet(skb, recv_if);
|
|
|
|
+ /* check if it is a backbone gateway. we don't accept
|
|
+ * roaming advertisement from it, as it has the same
|
|
+ * entries as we have. */
|
|
+ if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
|
|
+ goto out;
|
|
+
|
|
orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
|
|
if (!orig_node)
|
|
goto out;
|
|
@@ -806,7 +819,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
unicast_packet = (struct unicast_packet *)skb->data;
|
|
|
|
/* TTL exceeded */
|
|
- if (unicast_packet->ttl < 2) {
|
|
+ if (unicast_packet->header.ttl < 2) {
|
|
pr_debug("Warning - can't forward unicast packet from %pM to "
|
|
"%pM: ttl exceeded\n", ethhdr->h_source,
|
|
unicast_packet->dest);
|
|
@@ -831,7 +844,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
|
|
unicast_packet = (struct unicast_packet *)skb->data;
|
|
|
|
- if (unicast_packet->packet_type == BAT_UNICAST &&
|
|
+ if (unicast_packet->header.packet_type == BAT_UNICAST &&
|
|
atomic_read(&bat_priv->fragmentation) &&
|
|
skb->len > neigh_node->if_incoming->net_dev->mtu) {
|
|
ret = frag_send_skb(skb, bat_priv,
|
|
@@ -839,7 +852,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
goto out;
|
|
}
|
|
|
|
- if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
|
|
+ if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
|
|
frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
|
|
|
|
ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
|
|
@@ -858,7 +871,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
}
|
|
|
|
/* decrement ttl */
|
|
- unicast_packet->ttl--;
|
|
+ unicast_packet->header.ttl--;
|
|
|
|
/* route it */
|
|
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
|
|
@@ -1032,7 +1045,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
if (is_my_mac(bcast_packet->orig))
|
|
goto out;
|
|
|
|
- if (bcast_packet->ttl < 2)
|
|
+ if (bcast_packet->header.ttl < 2)
|
|
goto out;
|
|
|
|
orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
|
|
@@ -1061,9 +1074,18 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
|
|
|
|
spin_unlock_bh(&orig_node->bcast_seqno_lock);
|
|
|
|
+ /* check whether this has been sent by another originator before */
|
|
+ if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
|
|
+ goto out;
|
|
+
|
|
/* rebroadcast packet */
|
|
add_bcast_packet_to_list(bat_priv, skb, 1);
|
|
|
|
+ /* don't hand the broadcast up if it is from an originator
|
|
+ * from the same backbone. */
|
|
+ if (bla_is_backbone_gw(skb, orig_node, hdr_size))
|
|
+ goto out;
|
|
+
|
|
/* broadcast for me */
|
|
interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
|
|
ret = NET_RX_SUCCESS;
|
|
diff --git a/send.c b/send.c
|
|
index 8a684eb..019337e 100644
|
|
--- a/send.c
|
|
+++ b/send.c
|
|
@@ -28,7 +28,6 @@
|
|
#include "vis.h"
|
|
#include "gateway_common.h"
|
|
#include "originator.h"
|
|
-#include "bat_ogm.h"
|
|
|
|
static void send_outstanding_bcast_packet(struct work_struct *work);
|
|
|
|
@@ -168,7 +167,7 @@ void schedule_bat_ogm(struct hard_iface *hard_iface)
|
|
if (primary_if)
|
|
hardif_free_ref(primary_if);
|
|
|
|
- bat_ogm_schedule(hard_iface, tt_num_changes);
|
|
+ bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
|
|
}
|
|
|
|
static void forw_packet_free(struct forw_packet *forw_packet)
|
|
@@ -234,7 +233,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
|
|
|
|
/* as we have a copy now, it is safe to decrease the TTL */
|
|
bcast_packet = (struct bcast_packet *)newskb->data;
|
|
- bcast_packet->ttl--;
|
|
+ bcast_packet->header.ttl--;
|
|
|
|
skb_reset_mac_header(newskb);
|
|
|
|
@@ -318,7 +317,7 @@ void send_outstanding_bat_ogm_packet(struct work_struct *work)
|
|
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
|
|
goto out;
|
|
|
|
- bat_ogm_emit(forw_packet);
|
|
+ bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
|
|
|
|
/**
|
|
* we have to have at least one packet in the queue
|
|
diff --git a/soft-interface.c b/soft-interface.c
|
|
index 652fb7b..d3f520f 100644
|
|
--- a/soft-interface.c
|
|
+++ b/soft-interface.c
|
|
@@ -36,6 +36,7 @@
|
|
#include <linux/etherdevice.h>
|
|
#include <linux/if_vlan.h>
|
|
#include "unicast.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
|
|
|
|
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
|
|
@@ -73,440 +74,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
|
|
return 0;
|
|
}
|
|
|
|
-static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
|
|
-{
|
|
- if (atomic_dec_and_test(&softif_neigh->refcount))
|
|
- kfree_rcu(softif_neigh, rcu);
|
|
-}
|
|
-
|
|
-static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
|
|
-{
|
|
- struct softif_neigh_vid *softif_neigh_vid;
|
|
- struct softif_neigh *softif_neigh;
|
|
- struct hlist_node *node, *node_tmp;
|
|
- struct bat_priv *bat_priv;
|
|
-
|
|
- softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
|
|
- bat_priv = softif_neigh_vid->bat_priv;
|
|
-
|
|
- spin_lock_bh(&bat_priv->softif_neigh_lock);
|
|
- hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
|
|
- &softif_neigh_vid->softif_neigh_list, list) {
|
|
- hlist_del_rcu(&softif_neigh->list);
|
|
- softif_neigh_free_ref(softif_neigh);
|
|
- }
|
|
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
|
|
-
|
|
- kfree(softif_neigh_vid);
|
|
-}
|
|
-
|
|
-static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
|
|
-{
|
|
- if (atomic_dec_and_test(&softif_neigh_vid->refcount))
|
|
- call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
|
|
-}
|
|
-
|
|
-static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
|
|
- short vid)
|
|
-{
|
|
- struct softif_neigh_vid *softif_neigh_vid;
|
|
- struct hlist_node *node;
|
|
-
|
|
- rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(softif_neigh_vid, node,
|
|
- &bat_priv->softif_neigh_vids, list) {
|
|
- if (softif_neigh_vid->vid != vid)
|
|
- continue;
|
|
-
|
|
- if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
|
|
- continue;
|
|
-
|
|
- goto out;
|
|
- }
|
|
-
|
|
- softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
|
|
- if (!softif_neigh_vid)
|
|
- goto out;
|
|
-
|
|
- softif_neigh_vid->vid = vid;
|
|
- softif_neigh_vid->bat_priv = bat_priv;
|
|
-
|
|
- /* initialize with 2 - caller decrements counter by one */
|
|
- atomic_set(&softif_neigh_vid->refcount, 2);
|
|
- INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
|
|
- INIT_HLIST_NODE(&softif_neigh_vid->list);
|
|
- spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
|
|
- hlist_add_head_rcu(&softif_neigh_vid->list,
|
|
- &bat_priv->softif_neigh_vids);
|
|
- spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
|
|
-
|
|
-out:
|
|
- rcu_read_unlock();
|
|
- return softif_neigh_vid;
|
|
-}
|
|
-
|
|
-static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
|
|
- const uint8_t *addr, short vid)
|
|
-{
|
|
- struct softif_neigh_vid *softif_neigh_vid;
|
|
- struct softif_neigh *softif_neigh = NULL;
|
|
- struct hlist_node *node;
|
|
-
|
|
- softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
|
|
- if (!softif_neigh_vid)
|
|
- goto out;
|
|
-
|
|
- rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(softif_neigh, node,
|
|
- &softif_neigh_vid->softif_neigh_list,
|
|
- list) {
|
|
- if (!compare_eth(softif_neigh->addr, addr))
|
|
- continue;
|
|
-
|
|
- if (!atomic_inc_not_zero(&softif_neigh->refcount))
|
|
- continue;
|
|
-
|
|
- softif_neigh->last_seen = jiffies;
|
|
- goto unlock;
|
|
- }
|
|
-
|
|
- softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
|
|
- if (!softif_neigh)
|
|
- goto unlock;
|
|
-
|
|
- memcpy(softif_neigh->addr, addr, ETH_ALEN);
|
|
- softif_neigh->last_seen = jiffies;
|
|
- /* initialize with 2 - caller decrements counter by one */
|
|
- atomic_set(&softif_neigh->refcount, 2);
|
|
-
|
|
- INIT_HLIST_NODE(&softif_neigh->list);
|
|
- spin_lock_bh(&bat_priv->softif_neigh_lock);
|
|
- hlist_add_head_rcu(&softif_neigh->list,
|
|
- &softif_neigh_vid->softif_neigh_list);
|
|
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
|
|
-
|
|
-unlock:
|
|
- rcu_read_unlock();
|
|
-out:
|
|
- if (softif_neigh_vid)
|
|
- softif_neigh_vid_free_ref(softif_neigh_vid);
|
|
- return softif_neigh;
|
|
-}
|
|
-
|
|
-static struct softif_neigh *softif_neigh_get_selected(
|
|
- struct softif_neigh_vid *softif_neigh_vid)
|
|
-{
|
|
- struct softif_neigh *softif_neigh;
|
|
-
|
|
- rcu_read_lock();
|
|
- softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
|
|
-
|
|
- if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
|
|
- softif_neigh = NULL;
|
|
-
|
|
- rcu_read_unlock();
|
|
- return softif_neigh;
|
|
-}
|
|
-
|
|
-static struct softif_neigh *softif_neigh_vid_get_selected(
|
|
- struct bat_priv *bat_priv,
|
|
- short vid)
|
|
-{
|
|
- struct softif_neigh_vid *softif_neigh_vid;
|
|
- struct softif_neigh *softif_neigh = NULL;
|
|
-
|
|
- softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
|
|
- if (!softif_neigh_vid)
|
|
- goto out;
|
|
-
|
|
- softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
|
|
-out:
|
|
- if (softif_neigh_vid)
|
|
- softif_neigh_vid_free_ref(softif_neigh_vid);
|
|
- return softif_neigh;
|
|
-}
|
|
-
|
|
-static void softif_neigh_vid_select(struct bat_priv *bat_priv,
|
|
- struct softif_neigh *new_neigh,
|
|
- short vid)
|
|
-{
|
|
- struct softif_neigh_vid *softif_neigh_vid;
|
|
- struct softif_neigh *curr_neigh;
|
|
-
|
|
- softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
|
|
- if (!softif_neigh_vid)
|
|
- goto out;
|
|
-
|
|
- spin_lock_bh(&bat_priv->softif_neigh_lock);
|
|
-
|
|
- if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
|
|
- new_neigh = NULL;
|
|
-
|
|
- curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
|
|
- 1);
|
|
- rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
|
|
-
|
|
- if ((curr_neigh) && (!new_neigh))
|
|
- bat_dbg(DBG_ROUTES, bat_priv,
|
|
- "Removing mesh exit point on vid: %d (prev: %pM).\n",
|
|
- vid, curr_neigh->addr);
|
|
- else if ((curr_neigh) && (new_neigh))
|
|
- bat_dbg(DBG_ROUTES, bat_priv,
|
|
- "Changing mesh exit point on vid: %d from %pM "
|
|
- "to %pM.\n", vid, curr_neigh->addr, new_neigh->addr);
|
|
- else if ((!curr_neigh) && (new_neigh))
|
|
- bat_dbg(DBG_ROUTES, bat_priv,
|
|
- "Setting mesh exit point on vid: %d to %pM.\n",
|
|
- vid, new_neigh->addr);
|
|
-
|
|
- if (curr_neigh)
|
|
- softif_neigh_free_ref(curr_neigh);
|
|
-
|
|
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
|
|
-
|
|
-out:
|
|
- if (softif_neigh_vid)
|
|
- softif_neigh_vid_free_ref(softif_neigh_vid);
|
|
-}
|
|
-
|
|
-static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
|
|
- struct softif_neigh_vid *softif_neigh_vid)
|
|
-{
|
|
- struct softif_neigh *curr_neigh;
|
|
- struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
|
|
- struct hard_iface *primary_if = NULL;
|
|
- struct hlist_node *node;
|
|
-
|
|
- primary_if = primary_if_get_selected(bat_priv);
|
|
- if (!primary_if)
|
|
- goto out;
|
|
-
|
|
- /* find new softif_neigh immediately to avoid temporary loops */
|
|
- rcu_read_lock();
|
|
- curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
|
|
-
|
|
- hlist_for_each_entry_rcu(softif_neigh_tmp, node,
|
|
- &softif_neigh_vid->softif_neigh_list,
|
|
- list) {
|
|
- if (softif_neigh_tmp == curr_neigh)
|
|
- continue;
|
|
-
|
|
- /* we got a neighbor but its mac is 'bigger' than ours */
|
|
- if (memcmp(primary_if->net_dev->dev_addr,
|
|
- softif_neigh_tmp->addr, ETH_ALEN) < 0)
|
|
- continue;
|
|
-
|
|
- if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
|
|
- continue;
|
|
-
|
|
- softif_neigh = softif_neigh_tmp;
|
|
- goto unlock;
|
|
- }
|
|
-
|
|
-unlock:
|
|
- rcu_read_unlock();
|
|
-out:
|
|
- softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
|
|
-
|
|
- if (primary_if)
|
|
- hardif_free_ref(primary_if);
|
|
- if (softif_neigh)
|
|
- softif_neigh_free_ref(softif_neigh);
|
|
-}
|
|
-
|
|
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
|
|
-{
|
|
- struct net_device *net_dev = (struct net_device *)seq->private;
|
|
- struct bat_priv *bat_priv = netdev_priv(net_dev);
|
|
- struct softif_neigh_vid *softif_neigh_vid;
|
|
- struct softif_neigh *softif_neigh;
|
|
- struct hard_iface *primary_if;
|
|
- struct hlist_node *node, *node_tmp;
|
|
- struct softif_neigh *curr_softif_neigh;
|
|
- int ret = 0, last_seen_secs, last_seen_msecs;
|
|
-
|
|
- primary_if = primary_if_get_selected(bat_priv);
|
|
- if (!primary_if) {
|
|
- ret = seq_printf(seq, "BATMAN mesh %s disabled - "
|
|
- "please specify interfaces to enable it\n",
|
|
- net_dev->name);
|
|
- goto out;
|
|
- }
|
|
-
|
|
- if (primary_if->if_status != IF_ACTIVE) {
|
|
- ret = seq_printf(seq, "BATMAN mesh %s "
|
|
- "disabled - primary interface not active\n",
|
|
- net_dev->name);
|
|
- goto out;
|
|
- }
|
|
-
|
|
- seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
|
|
-
|
|
- rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(softif_neigh_vid, node,
|
|
- &bat_priv->softif_neigh_vids, list) {
|
|
- seq_printf(seq, " %-15s %s on vid: %d\n",
|
|
- "Originator", "last-seen", softif_neigh_vid->vid);
|
|
-
|
|
- curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
|
|
-
|
|
- hlist_for_each_entry_rcu(softif_neigh, node_tmp,
|
|
- &softif_neigh_vid->softif_neigh_list,
|
|
- list) {
|
|
- last_seen_secs = jiffies_to_msecs(jiffies -
|
|
- softif_neigh->last_seen) / 1000;
|
|
- last_seen_msecs = jiffies_to_msecs(jiffies -
|
|
- softif_neigh->last_seen) % 1000;
|
|
- seq_printf(seq, "%s %pM %3i.%03is\n",
|
|
- curr_softif_neigh == softif_neigh
|
|
- ? "=>" : " ", softif_neigh->addr,
|
|
- last_seen_secs, last_seen_msecs);
|
|
- }
|
|
-
|
|
- if (curr_softif_neigh)
|
|
- softif_neigh_free_ref(curr_softif_neigh);
|
|
-
|
|
- seq_printf(seq, "\n");
|
|
- }
|
|
- rcu_read_unlock();
|
|
-
|
|
-out:
|
|
- if (primary_if)
|
|
- hardif_free_ref(primary_if);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-void softif_neigh_purge(struct bat_priv *bat_priv)
|
|
-{
|
|
- struct softif_neigh *softif_neigh, *curr_softif_neigh;
|
|
- struct softif_neigh_vid *softif_neigh_vid;
|
|
- struct hlist_node *node, *node_tmp, *node_tmp2;
|
|
- int do_deselect;
|
|
-
|
|
- rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(softif_neigh_vid, node,
|
|
- &bat_priv->softif_neigh_vids, list) {
|
|
- if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
|
|
- continue;
|
|
-
|
|
- curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
|
|
- do_deselect = 0;
|
|
-
|
|
- spin_lock_bh(&bat_priv->softif_neigh_lock);
|
|
- hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
|
|
- &softif_neigh_vid->softif_neigh_list,
|
|
- list) {
|
|
- if ((!time_after(jiffies, softif_neigh->last_seen +
|
|
- msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) &&
|
|
- (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
|
|
- continue;
|
|
-
|
|
- if (curr_softif_neigh == softif_neigh) {
|
|
- bat_dbg(DBG_ROUTES, bat_priv,
|
|
- "Current mesh exit point on vid: %d "
|
|
- "'%pM' vanished.\n",
|
|
- softif_neigh_vid->vid,
|
|
- softif_neigh->addr);
|
|
- do_deselect = 1;
|
|
- }
|
|
-
|
|
- hlist_del_rcu(&softif_neigh->list);
|
|
- softif_neigh_free_ref(softif_neigh);
|
|
- }
|
|
- spin_unlock_bh(&bat_priv->softif_neigh_lock);
|
|
-
|
|
- /* soft_neigh_vid_deselect() needs to acquire the
|
|
- * softif_neigh_lock */
|
|
- if (do_deselect)
|
|
- softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
|
|
-
|
|
- if (curr_softif_neigh)
|
|
- softif_neigh_free_ref(curr_softif_neigh);
|
|
-
|
|
- softif_neigh_vid_free_ref(softif_neigh_vid);
|
|
- }
|
|
- rcu_read_unlock();
|
|
-
|
|
- spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
|
|
- hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
|
|
- &bat_priv->softif_neigh_vids, list) {
|
|
- if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
|
|
- continue;
|
|
-
|
|
- hlist_del_rcu(&softif_neigh_vid->list);
|
|
- softif_neigh_vid_free_ref(softif_neigh_vid);
|
|
- }
|
|
- spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
|
|
-
|
|
-}
|
|
-
|
|
-static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
|
|
- short vid)
|
|
-{
|
|
- struct bat_priv *bat_priv = netdev_priv(dev);
|
|
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
|
|
- struct batman_ogm_packet *batman_ogm_packet;
|
|
- struct softif_neigh *softif_neigh = NULL;
|
|
- struct hard_iface *primary_if = NULL;
|
|
- struct softif_neigh *curr_softif_neigh = NULL;
|
|
-
|
|
- if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
|
|
- batman_ogm_packet = (struct batman_ogm_packet *)
|
|
- (skb->data + ETH_HLEN + VLAN_HLEN);
|
|
- else
|
|
- batman_ogm_packet = (struct batman_ogm_packet *)
|
|
- (skb->data + ETH_HLEN);
|
|
-
|
|
- if (batman_ogm_packet->version != COMPAT_VERSION)
|
|
- goto out;
|
|
-
|
|
- if (batman_ogm_packet->packet_type != BAT_OGM)
|
|
- goto out;
|
|
-
|
|
- if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
|
|
- goto out;
|
|
-
|
|
- if (is_my_mac(batman_ogm_packet->orig))
|
|
- goto out;
|
|
-
|
|
- softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
|
|
- if (!softif_neigh)
|
|
- goto out;
|
|
-
|
|
- curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
|
|
- if (curr_softif_neigh == softif_neigh)
|
|
- goto out;
|
|
-
|
|
- primary_if = primary_if_get_selected(bat_priv);
|
|
- if (!primary_if)
|
|
- goto out;
|
|
-
|
|
- /* we got a neighbor but its mac is 'bigger' than ours */
|
|
- if (memcmp(primary_if->net_dev->dev_addr,
|
|
- softif_neigh->addr, ETH_ALEN) < 0)
|
|
- goto out;
|
|
-
|
|
- /* close own batX device and use softif_neigh as exit node */
|
|
- if (!curr_softif_neigh) {
|
|
- softif_neigh_vid_select(bat_priv, softif_neigh, vid);
|
|
- goto out;
|
|
- }
|
|
-
|
|
- /* switch to new 'smallest neighbor' */
|
|
- if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
|
|
- softif_neigh_vid_select(bat_priv, softif_neigh, vid);
|
|
-
|
|
-out:
|
|
- kfree_skb(skb);
|
|
- if (softif_neigh)
|
|
- softif_neigh_free_ref(softif_neigh);
|
|
- if (curr_softif_neigh)
|
|
- softif_neigh_free_ref(curr_softif_neigh);
|
|
- if (primary_if)
|
|
- hardif_free_ref(primary_if);
|
|
- return;
|
|
-}
|
|
-
|
|
static int interface_open(struct net_device *dev)
|
|
{
|
|
netif_start_queue(dev);
|
|
@@ -562,11 +129,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
|
|
struct hard_iface *primary_if = NULL;
|
|
struct bcast_packet *bcast_packet;
|
|
struct vlan_ethhdr *vhdr;
|
|
- struct softif_neigh *curr_softif_neigh = NULL;
|
|
- struct orig_node *orig_node = NULL;
|
|
+ uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00};
|
|
+ unsigned int header_len = 0;
|
|
int data_len = skb->len, ret;
|
|
short vid = -1;
|
|
- bool do_bcast;
|
|
+ bool do_bcast = false;
|
|
|
|
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
|
|
goto dropped;
|
|
@@ -583,32 +150,42 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
|
|
|
|
/* fall through */
|
|
case ETH_P_BATMAN:
|
|
- softif_batman_recv(skb, soft_iface, vid);
|
|
- goto end;
|
|
+ goto dropped;
|
|
}
|
|
|
|
- /**
|
|
- * if we have a another chosen mesh exit node in range
|
|
- * it will transport the packets to the mesh
|
|
- */
|
|
- curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
|
|
- if (curr_softif_neigh)
|
|
+ if (bla_tx(bat_priv, skb, vid))
|
|
goto dropped;
|
|
|
|
/* Register the client MAC in the transtable */
|
|
tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
|
|
|
|
- orig_node = transtable_search(bat_priv, ethhdr->h_source,
|
|
- ethhdr->h_dest);
|
|
- do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
|
|
- if (do_bcast || (orig_node && orig_node->gw_flags)) {
|
|
- ret = gw_is_target(bat_priv, skb, orig_node);
|
|
+ /* don't accept stp packets. STP does not help in meshes.
|
|
+ * better use the bridge loop avoidance ... */
|
|
+ if (compare_eth(ethhdr->h_dest, stp_addr))
|
|
+ goto dropped;
|
|
|
|
- if (ret < 0)
|
|
- goto dropped;
|
|
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
|
|
+ do_bcast = true;
|
|
|
|
- if (ret)
|
|
- do_bcast = false;
|
|
+ switch (atomic_read(&bat_priv->gw_mode)) {
|
|
+ case GW_MODE_SERVER:
|
|
+ /* gateway servers should not send dhcp
|
|
+ * requests into the mesh */
|
|
+ ret = gw_is_dhcp_target(skb, &header_len);
|
|
+ if (ret)
|
|
+ goto dropped;
|
|
+ break;
|
|
+ case GW_MODE_CLIENT:
|
|
+ /* gateway clients should send dhcp requests
|
|
+ * via unicast to their gateway */
|
|
+ ret = gw_is_dhcp_target(skb, &header_len);
|
|
+ if (ret)
|
|
+ do_bcast = false;
|
|
+ break;
|
|
+ case GW_MODE_OFF:
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
}
|
|
|
|
/* ethernet packet should be broadcasted */
|
|
@@ -621,11 +198,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
|
|
goto dropped;
|
|
|
|
bcast_packet = (struct bcast_packet *)skb->data;
|
|
- bcast_packet->version = COMPAT_VERSION;
|
|
- bcast_packet->ttl = TTL;
|
|
+ bcast_packet->header.version = COMPAT_VERSION;
|
|
+ bcast_packet->header.ttl = TTL;
|
|
|
|
/* batman packet type: broadcast */
|
|
- bcast_packet->packet_type = BAT_BCAST;
|
|
+ bcast_packet->header.packet_type = BAT_BCAST;
|
|
|
|
/* hw address of first interface is the orig mac because only
|
|
* this mac is known throughout the mesh */
|
|
@@ -644,6 +221,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
|
|
|
|
/* unicast packet */
|
|
} else {
|
|
+ if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
|
|
+ ret = gw_out_of_range(bat_priv, skb, ethhdr);
|
|
+ if (ret)
|
|
+ goto dropped;
|
|
+ }
|
|
+
|
|
ret = unicast_send_skb(skb, bat_priv);
|
|
if (ret != 0)
|
|
goto dropped_freed;
|
|
@@ -658,12 +241,8 @@ dropped:
|
|
dropped_freed:
|
|
bat_priv->stats.tx_dropped++;
|
|
end:
|
|
- if (curr_softif_neigh)
|
|
- softif_neigh_free_ref(curr_softif_neigh);
|
|
if (primary_if)
|
|
hardif_free_ref(primary_if);
|
|
- if (orig_node)
|
|
- orig_node_free_ref(orig_node);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -672,12 +251,9 @@ void interface_rx(struct net_device *soft_iface,
|
|
int hdr_size)
|
|
{
|
|
struct bat_priv *bat_priv = netdev_priv(soft_iface);
|
|
- struct unicast_packet *unicast_packet;
|
|
struct ethhdr *ethhdr;
|
|
struct vlan_ethhdr *vhdr;
|
|
- struct softif_neigh *curr_softif_neigh = NULL;
|
|
short vid = -1;
|
|
- int ret;
|
|
|
|
/* check if enough space is available for pulling, and pull */
|
|
if (!pskb_may_pull(skb, hdr_size))
|
|
@@ -701,30 +277,6 @@ void interface_rx(struct net_device *soft_iface,
|
|
goto dropped;
|
|
}
|
|
|
|
- /**
|
|
- * if we have a another chosen mesh exit node in range
|
|
- * it will transport the packets to the non-mesh network
|
|
- */
|
|
- curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
|
|
- if (curr_softif_neigh) {
|
|
- skb_push(skb, hdr_size);
|
|
- unicast_packet = (struct unicast_packet *)skb->data;
|
|
-
|
|
- if ((unicast_packet->packet_type != BAT_UNICAST) &&
|
|
- (unicast_packet->packet_type != BAT_UNICAST_FRAG))
|
|
- goto dropped;
|
|
-
|
|
- skb_reset_mac_header(skb);
|
|
-
|
|
- memcpy(unicast_packet->dest,
|
|
- curr_softif_neigh->addr, ETH_ALEN);
|
|
- ret = route_unicast_packet(skb, recv_if);
|
|
- if (ret == NET_RX_DROP)
|
|
- goto dropped;
|
|
-
|
|
- goto out;
|
|
- }
|
|
-
|
|
/* skb->dev & skb->pkt_type are set here */
|
|
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
|
|
goto dropped;
|
|
@@ -744,14 +296,17 @@ void interface_rx(struct net_device *soft_iface,
|
|
if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
|
|
goto dropped;
|
|
|
|
+ /* Let the bridge loop avoidance check the packet. If will
|
|
+ * not handle it, we can safely push it up. */
|
|
+ if (bla_rx(bat_priv, skb, vid))
|
|
+ goto out;
|
|
+
|
|
netif_rx(skb);
|
|
goto out;
|
|
|
|
dropped:
|
|
kfree_skb(skb);
|
|
out:
|
|
- if (curr_softif_neigh)
|
|
- softif_neigh_free_ref(curr_softif_neigh);
|
|
return;
|
|
}
|
|
|
|
@@ -815,6 +370,7 @@ struct net_device *softif_create(const char *name)
|
|
|
|
atomic_set(&bat_priv->aggregated_ogms, 1);
|
|
atomic_set(&bat_priv->bonding, 0);
|
|
+ atomic_set(&bat_priv->bridge_loop_avoidance, 0);
|
|
atomic_set(&bat_priv->ap_isolation, 0);
|
|
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
|
|
atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
|
|
@@ -832,6 +388,7 @@ struct net_device *softif_create(const char *name)
|
|
atomic_set(&bat_priv->ttvn, 0);
|
|
atomic_set(&bat_priv->tt_local_changes, 0);
|
|
atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
|
|
+ atomic_set(&bat_priv->bla_num_requests, 0);
|
|
|
|
bat_priv->tt_buff = NULL;
|
|
bat_priv->tt_buff_len = 0;
|
|
@@ -840,6 +397,10 @@ struct net_device *softif_create(const char *name)
|
|
bat_priv->primary_if = NULL;
|
|
bat_priv->num_ifaces = 0;
|
|
|
|
+ ret = bat_algo_select(bat_priv, bat_routing_algo);
|
|
+ if (ret < 0)
|
|
+ goto unreg_soft_iface;
|
|
+
|
|
ret = sysfs_add_meshif(soft_iface);
|
|
if (ret < 0)
|
|
goto unreg_soft_iface;
|
|
@@ -859,7 +420,7 @@ unreg_debugfs:
|
|
unreg_sysfs:
|
|
sysfs_del_meshif(soft_iface);
|
|
unreg_soft_iface:
|
|
- unregister_netdev(soft_iface);
|
|
+ unregister_netdevice(soft_iface);
|
|
return NULL;
|
|
|
|
free_soft_iface:
|
|
diff --git a/soft-interface.h b/soft-interface.h
|
|
index 001546f..694f0f6 100644
|
|
--- a/soft-interface.h
|
|
+++ b/soft-interface.h
|
|
@@ -23,8 +23,6 @@
|
|
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
|
|
|
|
int my_skb_head_push(struct sk_buff *skb, unsigned int len);
|
|
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
|
|
-void softif_neigh_purge(struct bat_priv *bat_priv);
|
|
void interface_rx(struct net_device *soft_iface,
|
|
struct sk_buff *skb, struct hard_iface *recv_if,
|
|
int hdr_size);
|
|
diff --git a/sysfs-class-net-mesh b/sysfs-class-net-mesh
|
|
index b020014..c81fe89 100644
|
|
--- a/sysfs-class-net-mesh
|
|
+++ b/sysfs-class-net-mesh
|
|
@@ -14,6 +14,15 @@ Description:
|
|
mesh will be sent using multiple interfaces at the
|
|
same time (if available).
|
|
|
|
+What: /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
|
|
+Date: November 2011
|
|
+Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
|
|
+Description:
|
|
+ Indicates whether the bridge loop avoidance feature
|
|
+ is enabled. This feature detects and avoids loops
|
|
+ between the mesh and devices bridged with the soft
|
|
+ interface <mesh_iface>.
|
|
+
|
|
What: /sys/class/net/<mesh_iface>/mesh/fragmentation
|
|
Date: October 2010
|
|
Contact: Andreas Langer <an.langer@gmx.de>
|
|
@@ -65,6 +74,13 @@ Description:
|
|
Defines the penalty which will be applied to an
|
|
originator message's tq-field on every hop.
|
|
|
|
+What: /sys/class/net/<mesh_iface>/mesh/routing_algo
|
|
+Date: Dec 2011
|
|
+Contact: Marek Lindner <lindner_marek@yahoo.de>
|
|
+Description:
|
|
+ Defines the routing procotol this mesh instance
|
|
+ uses to find the optimal paths through the mesh.
|
|
+
|
|
What: /sys/class/net/<mesh_iface>/mesh/vis_mode
|
|
Date: May 2010
|
|
Contact: Marek Lindner <lindner_marek@yahoo.de>
|
|
diff --git a/translation-table.c b/translation-table.c
|
|
index c7aafc7..12463d2 100644
|
|
--- a/translation-table.c
|
|
+++ b/translation-table.c
|
|
@@ -27,27 +27,17 @@
|
|
#include "hash.h"
|
|
#include "originator.h"
|
|
#include "routing.h"
|
|
+#include "bridge_loop_avoidance.h"
|
|
|
|
#include <linux/crc16.h>
|
|
|
|
-static void _tt_global_del(struct bat_priv *bat_priv,
|
|
- struct tt_global_entry *tt_global_entry,
|
|
- const char *message);
|
|
static void tt_purge(struct work_struct *work);
|
|
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
|
|
|
|
/* returns 1 if they are the same mac addr */
|
|
-static int compare_ltt(const struct hlist_node *node, const void *data2)
|
|
+static int compare_tt(const struct hlist_node *node, const void *data2)
|
|
{
|
|
- const void *data1 = container_of(node, struct tt_local_entry,
|
|
- hash_entry);
|
|
-
|
|
- return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
|
|
-}
|
|
-
|
|
-/* returns 1 if they are the same mac addr */
|
|
-static int compare_gtt(const struct hlist_node *node, const void *data2)
|
|
-{
|
|
- const void *data1 = container_of(node, struct tt_global_entry,
|
|
+ const void *data1 = container_of(node, struct tt_common_entry,
|
|
hash_entry);
|
|
|
|
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
|
|
@@ -60,14 +50,13 @@ static void tt_start_timer(struct bat_priv *bat_priv)
|
|
msecs_to_jiffies(5000));
|
|
}
|
|
|
|
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
|
|
- const void *data)
|
|
+static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
|
|
+ const void *data)
|
|
{
|
|
- struct hashtable_t *hash = bat_priv->tt_local_hash;
|
|
struct hlist_head *head;
|
|
struct hlist_node *node;
|
|
- struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
|
|
- int index;
|
|
+ struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
|
|
+ uint32_t index;
|
|
|
|
if (!hash)
|
|
return NULL;
|
|
@@ -76,83 +65,88 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
|
|
head = &hash->table[index];
|
|
|
|
rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
|
|
- if (!compare_eth(tt_local_entry, data))
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
|
|
+ if (!compare_eth(tt_common_entry, data))
|
|
continue;
|
|
|
|
- if (!atomic_inc_not_zero(&tt_local_entry->refcount))
|
|
+ if (!atomic_inc_not_zero(&tt_common_entry->refcount))
|
|
continue;
|
|
|
|
- tt_local_entry_tmp = tt_local_entry;
|
|
+ tt_common_entry_tmp = tt_common_entry;
|
|
break;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
- return tt_local_entry_tmp;
|
|
+ return tt_common_entry_tmp;
|
|
}
|
|
|
|
-static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
|
|
- const void *data)
|
|
+static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
|
|
+ const void *data)
|
|
{
|
|
- struct hashtable_t *hash = bat_priv->tt_global_hash;
|
|
- struct hlist_head *head;
|
|
- struct hlist_node *node;
|
|
- struct tt_global_entry *tt_global_entry;
|
|
- struct tt_global_entry *tt_global_entry_tmp = NULL;
|
|
- int index;
|
|
-
|
|
- if (!hash)
|
|
- return NULL;
|
|
-
|
|
- index = choose_orig(data, hash->size);
|
|
- head = &hash->table[index];
|
|
-
|
|
- rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
|
|
- if (!compare_eth(tt_global_entry, data))
|
|
- continue;
|
|
-
|
|
- if (!atomic_inc_not_zero(&tt_global_entry->refcount))
|
|
- continue;
|
|
-
|
|
- tt_global_entry_tmp = tt_global_entry;
|
|
- break;
|
|
- }
|
|
- rcu_read_unlock();
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
+ struct tt_local_entry *tt_local_entry = NULL;
|
|
|
|
- return tt_global_entry_tmp;
|
|
+ tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
|
|
+ if (tt_common_entry)
|
|
+ tt_local_entry = container_of(tt_common_entry,
|
|
+ struct tt_local_entry, common);
|
|
+ return tt_local_entry;
|
|
}
|
|
|
|
-static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
|
|
+static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
|
|
+ const void *data)
|
|
{
|
|
- unsigned long deadline;
|
|
- deadline = starting_time + msecs_to_jiffies(timeout);
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
+ struct tt_global_entry *tt_global_entry = NULL;
|
|
+
|
|
+ tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
|
|
+ if (tt_common_entry)
|
|
+ tt_global_entry = container_of(tt_common_entry,
|
|
+ struct tt_global_entry, common);
|
|
+ return tt_global_entry;
|
|
|
|
- return time_after(jiffies, deadline);
|
|
}
|
|
|
|
static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
|
|
{
|
|
- if (atomic_dec_and_test(&tt_local_entry->refcount))
|
|
- kfree_rcu(tt_local_entry, rcu);
|
|
+ if (atomic_dec_and_test(&tt_local_entry->common.refcount))
|
|
+ kfree_rcu(tt_local_entry, common.rcu);
|
|
}
|
|
|
|
static void tt_global_entry_free_rcu(struct rcu_head *rcu)
|
|
{
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_global_entry *tt_global_entry;
|
|
|
|
- tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
|
|
-
|
|
- if (tt_global_entry->orig_node)
|
|
- orig_node_free_ref(tt_global_entry->orig_node);
|
|
+ tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
|
|
+ tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
|
|
+ common);
|
|
|
|
kfree(tt_global_entry);
|
|
}
|
|
|
|
static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
|
|
{
|
|
- if (atomic_dec_and_test(&tt_global_entry->refcount))
|
|
- call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
|
|
+ if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
|
|
+ tt_global_del_orig_list(tt_global_entry);
|
|
+ call_rcu(&tt_global_entry->common.rcu,
|
|
+ tt_global_entry_free_rcu);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
|
|
+{
|
|
+ struct tt_orig_list_entry *orig_entry;
|
|
+
|
|
+ orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
|
|
+ atomic_dec(&orig_entry->orig_node->tt_size);
|
|
+ orig_node_free_ref(orig_entry->orig_node);
|
|
+ kfree(orig_entry);
|
|
+}
|
|
+
|
|
+static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
|
|
+{
|
|
+ call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
|
|
}
|
|
|
|
static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
|
|
@@ -201,6 +195,10 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
|
|
struct bat_priv *bat_priv = netdev_priv(soft_iface);
|
|
struct tt_local_entry *tt_local_entry = NULL;
|
|
struct tt_global_entry *tt_global_entry = NULL;
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node;
|
|
+ struct tt_orig_list_entry *orig_entry;
|
|
+ int hash_added;
|
|
|
|
tt_local_entry = tt_local_hash_find(bat_priv, addr);
|
|
|
|
@@ -217,39 +215,54 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
|
|
"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
|
|
(uint8_t)atomic_read(&bat_priv->ttvn));
|
|
|
|
- memcpy(tt_local_entry->addr, addr, ETH_ALEN);
|
|
- tt_local_entry->last_seen = jiffies;
|
|
- tt_local_entry->flags = NO_FLAGS;
|
|
+ memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
|
|
+ tt_local_entry->common.flags = NO_FLAGS;
|
|
if (is_wifi_iface(ifindex))
|
|
- tt_local_entry->flags |= TT_CLIENT_WIFI;
|
|
- atomic_set(&tt_local_entry->refcount, 2);
|
|
+ tt_local_entry->common.flags |= TT_CLIENT_WIFI;
|
|
+ atomic_set(&tt_local_entry->common.refcount, 2);
|
|
+ tt_local_entry->last_seen = jiffies;
|
|
|
|
/* the batman interface mac address should never be purged */
|
|
if (compare_eth(addr, soft_iface->dev_addr))
|
|
- tt_local_entry->flags |= TT_CLIENT_NOPURGE;
|
|
+ tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
|
|
|
|
- tt_local_event(bat_priv, addr, tt_local_entry->flags);
|
|
+ hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
|
|
+ &tt_local_entry->common,
|
|
+ &tt_local_entry->common.hash_entry);
|
|
+
|
|
+ if (unlikely(hash_added != 0)) {
|
|
+ /* remove the reference for the hash */
|
|
+ tt_local_entry_free_ref(tt_local_entry);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
|
|
|
|
/* The local entry has to be marked as NEW to avoid to send it in
|
|
* a full table response going out before the next ttvn increment
|
|
* (consistency check) */
|
|
- tt_local_entry->flags |= TT_CLIENT_NEW;
|
|
-
|
|
- hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
|
|
- tt_local_entry, &tt_local_entry->hash_entry);
|
|
+ tt_local_entry->common.flags |= TT_CLIENT_NEW;
|
|
|
|
/* remove address from global hash if present */
|
|
tt_global_entry = tt_global_hash_find(bat_priv, addr);
|
|
|
|
/* Check whether it is a roaming! */
|
|
if (tt_global_entry) {
|
|
- /* This node is probably going to update its tt table */
|
|
- tt_global_entry->orig_node->tt_poss_change = true;
|
|
- /* The global entry has to be marked as PENDING and has to be
|
|
- * kept for consistency purpose */
|
|
- tt_global_entry->flags |= TT_CLIENT_PENDING;
|
|
- send_roam_adv(bat_priv, tt_global_entry->addr,
|
|
- tt_global_entry->orig_node);
|
|
+ /* These node are probably going to update their tt table */
|
|
+ head = &tt_global_entry->orig_list;
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(orig_entry, node, head, list) {
|
|
+ orig_entry->orig_node->tt_poss_change = true;
|
|
+
|
|
+ send_roam_adv(bat_priv, tt_global_entry->common.addr,
|
|
+ orig_entry->orig_node);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ /* The global entry has to be marked as ROAMING and
|
|
+ * has to be kept for consistency purpose */
|
|
+
|
|
+ tt_global_entry->common.flags |= TT_CLIENT_ROAM;
|
|
+ tt_global_entry->roam_at = jiffies;
|
|
}
|
|
out:
|
|
if (tt_local_entry)
|
|
@@ -310,13 +323,12 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
|
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
|
struct hashtable_t *hash = bat_priv->tt_local_hash;
|
|
- struct tt_local_entry *tt_local_entry;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct hard_iface *primary_if;
|
|
struct hlist_node *node;
|
|
struct hlist_head *head;
|
|
- size_t buf_size, pos;
|
|
- char *buff;
|
|
- int i, ret = 0;
|
|
+ uint32_t i;
|
|
+ int ret = 0;
|
|
|
|
primary_if = primary_if_get_selected(bat_priv);
|
|
if (!primary_if) {
|
|
@@ -337,51 +349,27 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
|
|
"announced via TT (TTVN: %u):\n",
|
|
net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
|
|
|
|
- buf_size = 1;
|
|
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
|
|
- for (i = 0; i < hash->size; i++) {
|
|
- head = &hash->table[i];
|
|
-
|
|
- rcu_read_lock();
|
|
- __hlist_for_each_rcu(node, head)
|
|
- buf_size += 29;
|
|
- rcu_read_unlock();
|
|
- }
|
|
-
|
|
- buff = kmalloc(buf_size, GFP_ATOMIC);
|
|
- if (!buff) {
|
|
- ret = -ENOMEM;
|
|
- goto out;
|
|
- }
|
|
-
|
|
- buff[0] = '\0';
|
|
- pos = 0;
|
|
-
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
|
|
rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_local_entry, node,
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node,
|
|
head, hash_entry) {
|
|
- pos += snprintf(buff + pos, 30, " * %pM "
|
|
- "[%c%c%c%c%c]\n",
|
|
- tt_local_entry->addr,
|
|
- (tt_local_entry->flags &
|
|
+ seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
|
|
+ tt_common_entry->addr,
|
|
+ (tt_common_entry->flags &
|
|
TT_CLIENT_ROAM ? 'R' : '.'),
|
|
- (tt_local_entry->flags &
|
|
+ (tt_common_entry->flags &
|
|
TT_CLIENT_NOPURGE ? 'P' : '.'),
|
|
- (tt_local_entry->flags &
|
|
+ (tt_common_entry->flags &
|
|
TT_CLIENT_NEW ? 'N' : '.'),
|
|
- (tt_local_entry->flags &
|
|
+ (tt_common_entry->flags &
|
|
TT_CLIENT_PENDING ? 'X' : '.'),
|
|
- (tt_local_entry->flags &
|
|
+ (tt_common_entry->flags &
|
|
TT_CLIENT_WIFI ? 'W' : '.'));
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
-
|
|
- seq_printf(seq, "%s", buff);
|
|
- kfree(buff);
|
|
out:
|
|
if (primary_if)
|
|
hardif_free_ref(primary_if);
|
|
@@ -392,13 +380,13 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
|
|
struct tt_local_entry *tt_local_entry,
|
|
uint16_t flags)
|
|
{
|
|
- tt_local_event(bat_priv, tt_local_entry->addr,
|
|
- tt_local_entry->flags | flags);
|
|
+ tt_local_event(bat_priv, tt_local_entry->common.addr,
|
|
+ tt_local_entry->common.flags | flags);
|
|
|
|
/* The local client has to be marked as "pending to be removed" but has
|
|
* to be kept in the table in order to send it in a full table
|
|
* response issued before the net ttvn increment (consistency check) */
|
|
- tt_local_entry->flags |= TT_CLIENT_PENDING;
|
|
+ tt_local_entry->common.flags |= TT_CLIENT_PENDING;
|
|
}
|
|
|
|
void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
|
|
@@ -414,7 +402,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
|
|
(roaming ? TT_CLIENT_ROAM : NO_FLAGS));
|
|
|
|
bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
|
|
- "%s\n", tt_local_entry->addr, message);
|
|
+ "%s\n", tt_local_entry->common.addr, message);
|
|
out:
|
|
if (tt_local_entry)
|
|
tt_local_entry_free_ref(tt_local_entry);
|
|
@@ -424,34 +412,38 @@ static void tt_local_purge(struct bat_priv *bat_priv)
|
|
{
|
|
struct hashtable_t *hash = bat_priv->tt_local_hash;
|
|
struct tt_local_entry *tt_local_entry;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct hlist_node *node, *node_tmp;
|
|
struct hlist_head *head;
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
list_lock = &hash->list_locks[i];
|
|
|
|
spin_lock_bh(list_lock);
|
|
- hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
|
|
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
|
|
head, hash_entry) {
|
|
- if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
|
|
+ tt_local_entry = container_of(tt_common_entry,
|
|
+ struct tt_local_entry,
|
|
+ common);
|
|
+ if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
|
|
continue;
|
|
|
|
/* entry already marked for deletion */
|
|
- if (tt_local_entry->flags & TT_CLIENT_PENDING)
|
|
+ if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
|
|
continue;
|
|
|
|
- if (!is_out_of_time(tt_local_entry->last_seen,
|
|
- TT_LOCAL_TIMEOUT * 1000))
|
|
+ if (!has_timed_out(tt_local_entry->last_seen,
|
|
+ TT_LOCAL_TIMEOUT))
|
|
continue;
|
|
|
|
tt_local_set_pending(bat_priv, tt_local_entry,
|
|
TT_CLIENT_DEL);
|
|
bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
|
|
"pending to be removed: timed out\n",
|
|
- tt_local_entry->addr);
|
|
+ tt_local_entry->common.addr);
|
|
}
|
|
spin_unlock_bh(list_lock);
|
|
}
|
|
@@ -462,10 +454,11 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
|
|
{
|
|
struct hashtable_t *hash;
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_local_entry *tt_local_entry;
|
|
struct hlist_node *node, *node_tmp;
|
|
struct hlist_head *head;
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
if (!bat_priv->tt_local_hash)
|
|
return;
|
|
@@ -477,9 +470,12 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
|
|
list_lock = &hash->list_locks[i];
|
|
|
|
spin_lock_bh(list_lock);
|
|
- hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
|
|
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
|
|
head, hash_entry) {
|
|
hlist_del_rcu(node);
|
|
+ tt_local_entry = container_of(tt_common_entry,
|
|
+ struct tt_local_entry,
|
|
+ common);
|
|
tt_local_entry_free_ref(tt_local_entry);
|
|
}
|
|
spin_unlock_bh(list_lock);
|
|
@@ -519,60 +515,105 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
|
|
spin_unlock_bh(&bat_priv->tt_changes_list_lock);
|
|
}
|
|
|
|
+/* find out if an orig_node is already in the list of a tt_global_entry.
|
|
+ * returns 1 if found, 0 otherwise */
|
|
+static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
|
|
+ const struct orig_node *orig_node)
|
|
+{
|
|
+ struct tt_orig_list_entry *tmp_orig_entry;
|
|
+ const struct hlist_head *head;
|
|
+ struct hlist_node *node;
|
|
+ bool found = false;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ head = &entry->orig_list;
|
|
+ hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
|
|
+ if (tmp_orig_entry->orig_node == orig_node) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+ return found;
|
|
+}
|
|
+
|
|
/* caller must hold orig_node refcount */
|
|
int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
|
|
const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
|
|
bool wifi)
|
|
{
|
|
- struct tt_global_entry *tt_global_entry;
|
|
- struct orig_node *orig_node_tmp;
|
|
+ struct tt_global_entry *tt_global_entry = NULL;
|
|
+ struct tt_orig_list_entry *orig_entry = NULL;
|
|
int ret = 0;
|
|
+ int hash_added;
|
|
|
|
tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
|
|
|
|
if (!tt_global_entry) {
|
|
- tt_global_entry =
|
|
- kmalloc(sizeof(*tt_global_entry),
|
|
- GFP_ATOMIC);
|
|
+ tt_global_entry = kzalloc(sizeof(*tt_global_entry),
|
|
+ GFP_ATOMIC);
|
|
if (!tt_global_entry)
|
|
goto out;
|
|
+ orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
|
|
+ if (!orig_entry) {
|
|
+ kfree(tt_global_entry);
|
|
+ tt_global_entry = NULL;
|
|
+ goto out;
|
|
+ }
|
|
|
|
- memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
|
|
- /* Assign the new orig_node */
|
|
- atomic_inc(&orig_node->refcount);
|
|
- tt_global_entry->orig_node = orig_node;
|
|
- tt_global_entry->ttvn = ttvn;
|
|
- tt_global_entry->flags = NO_FLAGS;
|
|
+ memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
|
|
+
|
|
+ tt_global_entry->common.flags = NO_FLAGS;
|
|
tt_global_entry->roam_at = 0;
|
|
- atomic_set(&tt_global_entry->refcount, 2);
|
|
+ atomic_set(&tt_global_entry->common.refcount, 2);
|
|
|
|
- hash_add(bat_priv->tt_global_hash, compare_gtt,
|
|
- choose_orig, tt_global_entry,
|
|
- &tt_global_entry->hash_entry);
|
|
- atomic_inc(&orig_node->tt_size);
|
|
- } else {
|
|
- if (tt_global_entry->orig_node != orig_node) {
|
|
- atomic_dec(&tt_global_entry->orig_node->tt_size);
|
|
- orig_node_tmp = tt_global_entry->orig_node;
|
|
- atomic_inc(&orig_node->refcount);
|
|
- tt_global_entry->orig_node = orig_node;
|
|
- orig_node_free_ref(orig_node_tmp);
|
|
- atomic_inc(&orig_node->tt_size);
|
|
+ INIT_HLIST_HEAD(&tt_global_entry->orig_list);
|
|
+ spin_lock_init(&tt_global_entry->list_lock);
|
|
+
|
|
+ hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
|
|
+ choose_orig, &tt_global_entry->common,
|
|
+ &tt_global_entry->common.hash_entry);
|
|
+
|
|
+ if (unlikely(hash_added != 0)) {
|
|
+ /* remove the reference for the hash */
|
|
+ tt_global_entry_free_ref(tt_global_entry);
|
|
+ goto out_remove;
|
|
}
|
|
- tt_global_entry->ttvn = ttvn;
|
|
- tt_global_entry->flags = NO_FLAGS;
|
|
+ } else {
|
|
+ if (tt_global_entry_has_orig(tt_global_entry, orig_node))
|
|
+ /* already in the list, no need to add it again */
|
|
+ orig_entry = NULL;
|
|
+ else
|
|
+ orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
|
|
+
|
|
+ tt_global_entry->common.flags = NO_FLAGS;
|
|
tt_global_entry->roam_at = 0;
|
|
}
|
|
|
|
+ /* new orig_entry needs to be added */
|
|
+ if (orig_entry) {
|
|
+ INIT_HLIST_NODE(&orig_entry->list);
|
|
+ atomic_inc(&orig_node->refcount);
|
|
+ atomic_inc(&orig_node->tt_size);
|
|
+ orig_entry->orig_node = orig_node;
|
|
+ orig_entry->ttvn = ttvn;
|
|
+
|
|
+ spin_lock_bh(&tt_global_entry->list_lock);
|
|
+ hlist_add_head_rcu(&orig_entry->list,
|
|
+ &tt_global_entry->orig_list);
|
|
+ spin_unlock_bh(&tt_global_entry->list_lock);
|
|
+ }
|
|
+
|
|
if (wifi)
|
|
- tt_global_entry->flags |= TT_CLIENT_WIFI;
|
|
+ tt_global_entry->common.flags |= TT_CLIENT_WIFI;
|
|
|
|
bat_dbg(DBG_TT, bat_priv,
|
|
"Creating new global tt entry: %pM (via %pM)\n",
|
|
- tt_global_entry->addr, orig_node->orig);
|
|
+ tt_global_entry->common.addr, orig_node->orig);
|
|
|
|
+out_remove:
|
|
/* remove address from local hash if present */
|
|
- tt_local_remove(bat_priv, tt_global_entry->addr,
|
|
+ tt_local_remove(bat_priv, tt_global_entry->common.addr,
|
|
"global tt received", roaming);
|
|
ret = 1;
|
|
out:
|
|
@@ -581,18 +622,49 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
+/*
|
|
+ * print all orig nodes who announce the address for this global entry.
|
|
+ * it is assumed that the caller holds rcu_read_lock();
|
|
+ */
|
|
+static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
|
|
+ struct seq_file *seq)
|
|
+{
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node;
|
|
+ struct tt_orig_list_entry *orig_entry;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
+ uint16_t flags;
|
|
+ uint8_t last_ttvn;
|
|
+
|
|
+ tt_common_entry = &tt_global_entry->common;
|
|
+
|
|
+ head = &tt_global_entry->orig_list;
|
|
+
|
|
+ hlist_for_each_entry_rcu(orig_entry, node, head, list) {
|
|
+ flags = tt_common_entry->flags;
|
|
+ last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
|
|
+ seq_printf(seq, " * %pM (%3u) via %pM (%3u) "
|
|
+ "[%c%c%c]\n", tt_global_entry->common.addr,
|
|
+ orig_entry->ttvn,
|
|
+ orig_entry->orig_node->orig, last_ttvn,
|
|
+ (flags & TT_CLIENT_ROAM ? 'R' : '.'),
|
|
+ (flags & TT_CLIENT_PENDING ? 'X' : '.'),
|
|
+ (flags & TT_CLIENT_WIFI ? 'W' : '.'));
|
|
+ }
|
|
+}
|
|
+
|
|
int tt_global_seq_print_text(struct seq_file *seq, void *offset)
|
|
{
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
|
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
|
struct hashtable_t *hash = bat_priv->tt_global_hash;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_global_entry *tt_global_entry;
|
|
struct hard_iface *primary_if;
|
|
struct hlist_node *node;
|
|
struct hlist_head *head;
|
|
- size_t buf_size, pos;
|
|
- char *buff;
|
|
- int i, ret = 0;
|
|
+ uint32_t i;
|
|
+ int ret = 0;
|
|
|
|
primary_if = primary_if_get_selected(bat_priv);
|
|
if (!primary_if) {
|
|
@@ -615,108 +687,125 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
|
|
seq_printf(seq, " %-13s %s %-15s %s %s\n",
|
|
"Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
|
|
|
|
- buf_size = 1;
|
|
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
|
|
- * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
|
|
rcu_read_lock();
|
|
- __hlist_for_each_rcu(node, head)
|
|
- buf_size += 67;
|
|
- rcu_read_unlock();
|
|
- }
|
|
-
|
|
- buff = kmalloc(buf_size, GFP_ATOMIC);
|
|
- if (!buff) {
|
|
- ret = -ENOMEM;
|
|
- goto out;
|
|
- }
|
|
-
|
|
- buff[0] = '\0';
|
|
- pos = 0;
|
|
-
|
|
- for (i = 0; i < hash->size; i++) {
|
|
- head = &hash->table[i];
|
|
-
|
|
- rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_global_entry, node,
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node,
|
|
head, hash_entry) {
|
|
- pos += snprintf(buff + pos, 69,
|
|
- " * %pM (%3u) via %pM (%3u) "
|
|
- "[%c%c%c]\n", tt_global_entry->addr,
|
|
- tt_global_entry->ttvn,
|
|
- tt_global_entry->orig_node->orig,
|
|
- (uint8_t) atomic_read(
|
|
- &tt_global_entry->orig_node->
|
|
- last_ttvn),
|
|
- (tt_global_entry->flags &
|
|
- TT_CLIENT_ROAM ? 'R' : '.'),
|
|
- (tt_global_entry->flags &
|
|
- TT_CLIENT_PENDING ? 'X' : '.'),
|
|
- (tt_global_entry->flags &
|
|
- TT_CLIENT_WIFI ? 'W' : '.'));
|
|
+ tt_global_entry = container_of(tt_common_entry,
|
|
+ struct tt_global_entry,
|
|
+ common);
|
|
+ tt_global_print_entry(tt_global_entry, seq);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
-
|
|
- seq_printf(seq, "%s", buff);
|
|
- kfree(buff);
|
|
out:
|
|
if (primary_if)
|
|
hardif_free_ref(primary_if);
|
|
return ret;
|
|
}
|
|
|
|
-static void _tt_global_del(struct bat_priv *bat_priv,
|
|
- struct tt_global_entry *tt_global_entry,
|
|
- const char *message)
|
|
+/* deletes the orig list of a tt_global_entry */
|
|
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
|
|
{
|
|
- if (!tt_global_entry)
|
|
- goto out;
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node, *safe;
|
|
+ struct tt_orig_list_entry *orig_entry;
|
|
|
|
- bat_dbg(DBG_TT, bat_priv,
|
|
- "Deleting global tt entry %pM (via %pM): %s\n",
|
|
- tt_global_entry->addr, tt_global_entry->orig_node->orig,
|
|
- message);
|
|
+ spin_lock_bh(&tt_global_entry->list_lock);
|
|
+ head = &tt_global_entry->orig_list;
|
|
+ hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
|
|
+ hlist_del_rcu(node);
|
|
+ tt_orig_list_entry_free_ref(orig_entry);
|
|
+ }
|
|
+ spin_unlock_bh(&tt_global_entry->list_lock);
|
|
|
|
- atomic_dec(&tt_global_entry->orig_node->tt_size);
|
|
+}
|
|
|
|
- hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
|
|
- tt_global_entry->addr);
|
|
-out:
|
|
- if (tt_global_entry)
|
|
- tt_global_entry_free_ref(tt_global_entry);
|
|
+static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
|
|
+ struct tt_global_entry *tt_global_entry,
|
|
+ struct orig_node *orig_node,
|
|
+ const char *message)
|
|
+{
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node, *safe;
|
|
+ struct tt_orig_list_entry *orig_entry;
|
|
+
|
|
+ spin_lock_bh(&tt_global_entry->list_lock);
|
|
+ head = &tt_global_entry->orig_list;
|
|
+ hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
|
|
+ if (orig_entry->orig_node == orig_node) {
|
|
+ bat_dbg(DBG_TT, bat_priv,
|
|
+ "Deleting %pM from global tt entry %pM: %s\n",
|
|
+ orig_node->orig, tt_global_entry->common.addr,
|
|
+ message);
|
|
+ hlist_del_rcu(node);
|
|
+ tt_orig_list_entry_free_ref(orig_entry);
|
|
+ }
|
|
+ }
|
|
+ spin_unlock_bh(&tt_global_entry->list_lock);
|
|
}
|
|
|
|
+
|
|
void tt_global_del(struct bat_priv *bat_priv,
|
|
struct orig_node *orig_node, const unsigned char *addr,
|
|
const char *message, bool roaming)
|
|
{
|
|
struct tt_global_entry *tt_global_entry = NULL;
|
|
+ struct tt_local_entry *tt_local_entry = NULL;
|
|
|
|
tt_global_entry = tt_global_hash_find(bat_priv, addr);
|
|
if (!tt_global_entry)
|
|
goto out;
|
|
|
|
- if (tt_global_entry->orig_node == orig_node) {
|
|
- if (roaming) {
|
|
- tt_global_entry->flags |= TT_CLIENT_ROAM;
|
|
- tt_global_entry->roam_at = jiffies;
|
|
- goto out;
|
|
- }
|
|
- _tt_global_del(bat_priv, tt_global_entry, message);
|
|
+ if (!roaming)
|
|
+ goto out_del;
|
|
+
|
|
+ /* if we are deleting a global entry due to a roam
|
|
+ * event, there are two possibilities:
|
|
+ * 1) the client roamed from node A to node B => we mark
|
|
+ * it with TT_CLIENT_ROAM, we start a timer and we
|
|
+ * wait for node B to claim it. In case of timeout
|
|
+ * the entry is purged.
|
|
+ * 2) the client roamed to us => we can directly delete
|
|
+ * the global entry, since it is useless now. */
|
|
+ tt_local_entry = tt_local_hash_find(bat_priv,
|
|
+ tt_global_entry->common.addr);
|
|
+ if (!tt_local_entry) {
|
|
+ tt_global_entry->common.flags |= TT_CLIENT_ROAM;
|
|
+ tt_global_entry->roam_at = jiffies;
|
|
+ goto out;
|
|
}
|
|
+
|
|
+out_del:
|
|
+ tt_global_del_orig_entry(bat_priv, tt_global_entry,
|
|
+ orig_node, message);
|
|
+
|
|
+ if (hlist_empty(&tt_global_entry->orig_list)) {
|
|
+ bat_dbg(DBG_TT, bat_priv,
|
|
+ "Deleting global tt entry %pM (via %pM): %s\n",
|
|
+ tt_global_entry->common.addr, orig_node->orig,
|
|
+ message);
|
|
+
|
|
+ hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
|
|
+ tt_global_entry->common.addr);
|
|
+ tt_global_entry_free_ref(tt_global_entry);
|
|
+ }
|
|
+
|
|
out:
|
|
if (tt_global_entry)
|
|
tt_global_entry_free_ref(tt_global_entry);
|
|
+ if (tt_local_entry)
|
|
+ tt_local_entry_free_ref(tt_local_entry);
|
|
}
|
|
|
|
void tt_global_del_orig(struct bat_priv *bat_priv,
|
|
struct orig_node *orig_node, const char *message)
|
|
{
|
|
struct tt_global_entry *tt_global_entry;
|
|
- int i;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
+ uint32_t i;
|
|
struct hashtable_t *hash = bat_priv->tt_global_hash;
|
|
struct hlist_node *node, *safe;
|
|
struct hlist_head *head;
|
|
@@ -730,14 +819,20 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
|
|
list_lock = &hash->list_locks[i];
|
|
|
|
spin_lock_bh(list_lock);
|
|
- hlist_for_each_entry_safe(tt_global_entry, node, safe,
|
|
- head, hash_entry) {
|
|
- if (tt_global_entry->orig_node == orig_node) {
|
|
+ hlist_for_each_entry_safe(tt_common_entry, node, safe,
|
|
+ head, hash_entry) {
|
|
+ tt_global_entry = container_of(tt_common_entry,
|
|
+ struct tt_global_entry,
|
|
+ common);
|
|
+
|
|
+ tt_global_del_orig_entry(bat_priv, tt_global_entry,
|
|
+ orig_node, message);
|
|
+
|
|
+ if (hlist_empty(&tt_global_entry->orig_list)) {
|
|
bat_dbg(DBG_TT, bat_priv,
|
|
- "Deleting global tt entry %pM "
|
|
- "(via %pM): originator time out\n",
|
|
- tt_global_entry->addr,
|
|
- tt_global_entry->orig_node->orig);
|
|
+ "Deleting global tt entry %pM: %s\n",
|
|
+ tt_global_entry->common.addr,
|
|
+ message);
|
|
hlist_del_rcu(node);
|
|
tt_global_entry_free_ref(tt_global_entry);
|
|
}
|
|
@@ -745,34 +840,39 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
|
|
spin_unlock_bh(list_lock);
|
|
}
|
|
atomic_set(&orig_node->tt_size, 0);
|
|
+ orig_node->tt_initialised = false;
|
|
}
|
|
|
|
static void tt_global_roam_purge(struct bat_priv *bat_priv)
|
|
{
|
|
struct hashtable_t *hash = bat_priv->tt_global_hash;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_global_entry *tt_global_entry;
|
|
struct hlist_node *node, *node_tmp;
|
|
struct hlist_head *head;
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
list_lock = &hash->list_locks[i];
|
|
|
|
spin_lock_bh(list_lock);
|
|
- hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
|
|
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
|
|
head, hash_entry) {
|
|
- if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
|
|
+ tt_global_entry = container_of(tt_common_entry,
|
|
+ struct tt_global_entry,
|
|
+ common);
|
|
+ if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
|
|
continue;
|
|
- if (!is_out_of_time(tt_global_entry->roam_at,
|
|
- TT_CLIENT_ROAM_TIMEOUT * 1000))
|
|
+ if (!has_timed_out(tt_global_entry->roam_at,
|
|
+ TT_CLIENT_ROAM_TIMEOUT))
|
|
continue;
|
|
|
|
bat_dbg(DBG_TT, bat_priv, "Deleting global "
|
|
"tt entry (%pM): Roaming timeout\n",
|
|
- tt_global_entry->addr);
|
|
- atomic_dec(&tt_global_entry->orig_node->tt_size);
|
|
+ tt_global_entry->common.addr);
|
|
+
|
|
hlist_del_rcu(node);
|
|
tt_global_entry_free_ref(tt_global_entry);
|
|
}
|
|
@@ -785,10 +885,11 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
|
|
{
|
|
struct hashtable_t *hash;
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_global_entry *tt_global_entry;
|
|
struct hlist_node *node, *node_tmp;
|
|
struct hlist_head *head;
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
if (!bat_priv->tt_global_hash)
|
|
return;
|
|
@@ -800,9 +901,12 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
|
|
list_lock = &hash->list_locks[i];
|
|
|
|
spin_lock_bh(list_lock);
|
|
- hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
|
|
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
|
|
head, hash_entry) {
|
|
hlist_del_rcu(node);
|
|
+ tt_global_entry = container_of(tt_common_entry,
|
|
+ struct tt_global_entry,
|
|
+ common);
|
|
tt_global_entry_free_ref(tt_global_entry);
|
|
}
|
|
spin_unlock_bh(list_lock);
|
|
@@ -818,8 +922,8 @@ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
|
|
{
|
|
bool ret = false;
|
|
|
|
- if (tt_local_entry->flags & TT_CLIENT_WIFI &&
|
|
- tt_global_entry->flags & TT_CLIENT_WIFI)
|
|
+ if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
|
|
+ tt_global_entry->common.flags & TT_CLIENT_WIFI)
|
|
ret = true;
|
|
|
|
return ret;
|
|
@@ -831,6 +935,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
|
|
struct tt_local_entry *tt_local_entry = NULL;
|
|
struct tt_global_entry *tt_global_entry = NULL;
|
|
struct orig_node *orig_node = NULL;
|
|
+ struct neigh_node *router = NULL;
|
|
+ struct hlist_head *head;
|
|
+ struct hlist_node *node;
|
|
+ struct tt_orig_list_entry *orig_entry;
|
|
+ int best_tq;
|
|
|
|
if (src && atomic_read(&bat_priv->ap_isolation)) {
|
|
tt_local_entry = tt_local_hash_find(bat_priv, src);
|
|
@@ -847,16 +956,30 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
|
|
if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
|
|
goto out;
|
|
|
|
- if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
|
|
- goto out;
|
|
-
|
|
/* A global client marked as PENDING has already moved from that
|
|
* originator */
|
|
- if (tt_global_entry->flags & TT_CLIENT_PENDING)
|
|
+ if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
|
|
goto out;
|
|
|
|
- orig_node = tt_global_entry->orig_node;
|
|
+ best_tq = 0;
|
|
|
|
+ rcu_read_lock();
|
|
+ head = &tt_global_entry->orig_list;
|
|
+ hlist_for_each_entry_rcu(orig_entry, node, head, list) {
|
|
+ router = orig_node_get_router(orig_entry->orig_node);
|
|
+ if (!router)
|
|
+ continue;
|
|
+
|
|
+ if (router->tq_avg > best_tq) {
|
|
+ orig_node = orig_entry->orig_node;
|
|
+ best_tq = router->tq_avg;
|
|
+ }
|
|
+ neigh_node_free_ref(router);
|
|
+ }
|
|
+ /* found anything? */
|
|
+ if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
|
|
+ orig_node = NULL;
|
|
+ rcu_read_unlock();
|
|
out:
|
|
if (tt_global_entry)
|
|
tt_global_entry_free_ref(tt_global_entry);
|
|
@@ -871,31 +994,40 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
|
|
{
|
|
uint16_t total = 0, total_one;
|
|
struct hashtable_t *hash = bat_priv->tt_global_hash;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_global_entry *tt_global_entry;
|
|
struct hlist_node *node;
|
|
struct hlist_head *head;
|
|
- int i, j;
|
|
+ uint32_t i;
|
|
+ int j;
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
|
|
rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_global_entry, node,
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node,
|
|
head, hash_entry) {
|
|
- if (compare_eth(tt_global_entry->orig_node,
|
|
- orig_node)) {
|
|
- /* Roaming clients are in the global table for
|
|
- * consistency only. They don't have to be
|
|
- * taken into account while computing the
|
|
- * global crc */
|
|
- if (tt_global_entry->flags & TT_CLIENT_ROAM)
|
|
- continue;
|
|
- total_one = 0;
|
|
- for (j = 0; j < ETH_ALEN; j++)
|
|
- total_one = crc16_byte(total_one,
|
|
- tt_global_entry->addr[j]);
|
|
- total ^= total_one;
|
|
- }
|
|
+ tt_global_entry = container_of(tt_common_entry,
|
|
+ struct tt_global_entry,
|
|
+ common);
|
|
+ /* Roaming clients are in the global table for
|
|
+ * consistency only. They don't have to be
|
|
+ * taken into account while computing the
|
|
+ * global crc */
|
|
+ if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
|
|
+ continue;
|
|
+
|
|
+ /* find out if this global entry is announced by this
|
|
+ * originator */
|
|
+ if (!tt_global_entry_has_orig(tt_global_entry,
|
|
+ orig_node))
|
|
+ continue;
|
|
+
|
|
+ total_one = 0;
|
|
+ for (j = 0; j < ETH_ALEN; j++)
|
|
+ total_one = crc16_byte(total_one,
|
|
+ tt_global_entry->common.addr[j]);
|
|
+ total ^= total_one;
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
@@ -908,25 +1040,26 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
|
|
{
|
|
uint16_t total = 0, total_one;
|
|
struct hashtable_t *hash = bat_priv->tt_local_hash;
|
|
- struct tt_local_entry *tt_local_entry;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct hlist_node *node;
|
|
struct hlist_head *head;
|
|
- int i, j;
|
|
+ uint32_t i;
|
|
+ int j;
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
|
|
rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_local_entry, node,
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node,
|
|
head, hash_entry) {
|
|
/* not yet committed clients have not to be taken into
|
|
* account while computing the CRC */
|
|
- if (tt_local_entry->flags & TT_CLIENT_NEW)
|
|
+ if (tt_common_entry->flags & TT_CLIENT_NEW)
|
|
continue;
|
|
total_one = 0;
|
|
for (j = 0; j < ETH_ALEN; j++)
|
|
total_one = crc16_byte(total_one,
|
|
- tt_local_entry->addr[j]);
|
|
+ tt_common_entry->addr[j]);
|
|
total ^= total_one;
|
|
}
|
|
rcu_read_unlock();
|
|
@@ -975,8 +1108,7 @@ static void tt_req_purge(struct bat_priv *bat_priv)
|
|
|
|
spin_lock_bh(&bat_priv->tt_req_list_lock);
|
|
list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
|
|
- if (is_out_of_time(node->issued_at,
|
|
- TT_REQUEST_TIMEOUT * 1000)) {
|
|
+ if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
|
|
list_del(&node->list);
|
|
kfree(node);
|
|
}
|
|
@@ -994,8 +1126,8 @@ static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
|
|
spin_lock_bh(&bat_priv->tt_req_list_lock);
|
|
list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
|
|
if (compare_eth(tt_req_node_tmp, orig_node) &&
|
|
- !is_out_of_time(tt_req_node_tmp->issued_at,
|
|
- TT_REQUEST_TIMEOUT * 1000))
|
|
+ !has_timed_out(tt_req_node_tmp->issued_at,
|
|
+ TT_REQUEST_TIMEOUT))
|
|
goto unlock;
|
|
}
|
|
|
|
@@ -1015,22 +1147,26 @@ unlock:
|
|
/* data_ptr is useless here, but has to be kept to respect the prototype */
|
|
static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
|
|
{
|
|
- const struct tt_local_entry *tt_local_entry = entry_ptr;
|
|
+ const struct tt_common_entry *tt_common_entry = entry_ptr;
|
|
|
|
- if (tt_local_entry->flags & TT_CLIENT_NEW)
|
|
+ if (tt_common_entry->flags & TT_CLIENT_NEW)
|
|
return 0;
|
|
return 1;
|
|
}
|
|
|
|
static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
|
|
{
|
|
- const struct tt_global_entry *tt_global_entry = entry_ptr;
|
|
+ const struct tt_common_entry *tt_common_entry = entry_ptr;
|
|
+ const struct tt_global_entry *tt_global_entry;
|
|
const struct orig_node *orig_node = data_ptr;
|
|
|
|
- if (tt_global_entry->flags & TT_CLIENT_ROAM)
|
|
+ if (tt_common_entry->flags & TT_CLIENT_ROAM)
|
|
return 0;
|
|
|
|
- return (tt_global_entry->orig_node == orig_node);
|
|
+ tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
|
|
+ common);
|
|
+
|
|
+ return tt_global_entry_has_orig(tt_global_entry, orig_node);
|
|
}
|
|
|
|
static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
|
|
@@ -1040,7 +1176,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
|
|
const void *),
|
|
void *cb_data)
|
|
{
|
|
- struct tt_local_entry *tt_local_entry;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_query_packet *tt_response;
|
|
struct tt_change *tt_change;
|
|
struct hlist_node *node;
|
|
@@ -1048,7 +1184,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
|
|
struct sk_buff *skb = NULL;
|
|
uint16_t tt_tot, tt_count;
|
|
ssize_t tt_query_size = sizeof(struct tt_query_packet);
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
|
|
tt_len = primary_if->soft_iface->mtu - tt_query_size;
|
|
@@ -1072,15 +1208,16 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
|
|
- hlist_for_each_entry_rcu(tt_local_entry, node,
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node,
|
|
head, hash_entry) {
|
|
if (tt_count == tt_tot)
|
|
break;
|
|
|
|
- if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
|
|
+ if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
|
|
continue;
|
|
|
|
- memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
|
|
+ memcpy(tt_change->addr, tt_common_entry->addr,
|
|
+ ETH_ALEN);
|
|
tt_change->flags = NO_FLAGS;
|
|
|
|
tt_count++;
|
|
@@ -1127,11 +1264,11 @@ static int send_tt_request(struct bat_priv *bat_priv,
|
|
tt_request = (struct tt_query_packet *)skb_put(skb,
|
|
sizeof(struct tt_query_packet));
|
|
|
|
- tt_request->packet_type = BAT_TT_QUERY;
|
|
- tt_request->version = COMPAT_VERSION;
|
|
+ tt_request->header.packet_type = BAT_TT_QUERY;
|
|
+ tt_request->header.version = COMPAT_VERSION;
|
|
memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
|
|
- tt_request->ttl = TTL;
|
|
+ tt_request->header.ttl = TTL;
|
|
tt_request->ttvn = ttvn;
|
|
tt_request->tt_data = tt_crc;
|
|
tt_request->flags = TT_REQUEST;
|
|
@@ -1187,11 +1324,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
|
|
(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
|
|
|
|
/* Let's get the orig node of the REAL destination */
|
|
- req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
|
|
+ req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
|
|
if (!req_dst_orig_node)
|
|
goto out;
|
|
|
|
- res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
|
|
+ res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
|
|
if (!res_dst_orig_node)
|
|
goto out;
|
|
|
|
@@ -1257,9 +1394,9 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
|
|
tt_response = (struct tt_query_packet *)skb->data;
|
|
}
|
|
|
|
- tt_response->packet_type = BAT_TT_QUERY;
|
|
- tt_response->version = COMPAT_VERSION;
|
|
- tt_response->ttl = TTL;
|
|
+ tt_response->header.packet_type = BAT_TT_QUERY;
|
|
+ tt_response->header.version = COMPAT_VERSION;
|
|
+ tt_response->header.ttl = TTL;
|
|
memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
|
|
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
|
|
tt_response->flags = TT_RESPONSE;
|
|
@@ -1317,7 +1454,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
|
|
my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
|
|
req_ttvn = tt_request->ttvn;
|
|
|
|
- orig_node = get_orig_node(bat_priv, tt_request->src);
|
|
+ orig_node = orig_hash_find(bat_priv, tt_request->src);
|
|
if (!orig_node)
|
|
goto out;
|
|
|
|
@@ -1374,9 +1511,9 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
|
|
tt_response = (struct tt_query_packet *)skb->data;
|
|
}
|
|
|
|
- tt_response->packet_type = BAT_TT_QUERY;
|
|
- tt_response->version = COMPAT_VERSION;
|
|
- tt_response->ttl = TTL;
|
|
+ tt_response->header.packet_type = BAT_TT_QUERY;
|
|
+ tt_response->header.version = COMPAT_VERSION;
|
|
+ tt_response->header.ttl = TTL;
|
|
memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
|
|
tt_response->flags = TT_RESPONSE;
|
|
@@ -1411,9 +1548,13 @@ out:
|
|
bool send_tt_response(struct bat_priv *bat_priv,
|
|
struct tt_query_packet *tt_request)
|
|
{
|
|
- if (is_my_mac(tt_request->dst))
|
|
+ if (is_my_mac(tt_request->dst)) {
|
|
+ /* don't answer backbone gws! */
|
|
+ if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
|
|
+ return true;
|
|
+
|
|
return send_my_tt_response(bat_priv, tt_request);
|
|
- else
|
|
+ } else
|
|
return send_other_tt_response(bat_priv, tt_request);
|
|
}
|
|
|
|
@@ -1485,6 +1626,7 @@ static void tt_update_changes(struct bat_priv *bat_priv,
|
|
tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
|
|
tt_num_changes);
|
|
atomic_set(&orig_node->last_ttvn, ttvn);
|
|
+ orig_node->tt_initialised = true;
|
|
}
|
|
|
|
bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
|
|
@@ -1497,7 +1639,7 @@ bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
|
|
goto out;
|
|
/* Check if the client has been logically deleted (but is kept for
|
|
* consistency purpose) */
|
|
- if (tt_local_entry->flags & TT_CLIENT_PENDING)
|
|
+ if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
|
|
goto out;
|
|
ret = true;
|
|
out:
|
|
@@ -1518,6 +1660,10 @@ void handle_tt_response(struct bat_priv *bat_priv,
|
|
tt_response->tt_data,
|
|
(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
|
|
|
|
+ /* we should have never asked a backbone gw */
|
|
+ if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
|
|
+ goto out;
|
|
+
|
|
orig_node = orig_hash_find(bat_priv, tt_response->src);
|
|
if (!orig_node)
|
|
goto out;
|
|
@@ -1582,8 +1728,7 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
|
|
|
|
spin_lock_bh(&bat_priv->tt_roam_list_lock);
|
|
list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
|
|
- if (!is_out_of_time(node->first_time,
|
|
- ROAMING_MAX_TIME * 1000))
|
|
+ if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
|
|
continue;
|
|
|
|
list_del(&node->list);
|
|
@@ -1610,8 +1755,7 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
|
|
if (!compare_eth(tt_roam_node->addr, client))
|
|
continue;
|
|
|
|
- if (is_out_of_time(tt_roam_node->first_time,
|
|
- ROAMING_MAX_TIME * 1000))
|
|
+ if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
|
|
continue;
|
|
|
|
if (!atomic_dec_not_zero(&tt_roam_node->counter))
|
|
@@ -1662,9 +1806,9 @@ void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
|
|
roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
|
|
sizeof(struct roam_adv_packet));
|
|
|
|
- roam_adv_packet->packet_type = BAT_ROAM_ADV;
|
|
- roam_adv_packet->version = COMPAT_VERSION;
|
|
- roam_adv_packet->ttl = TTL;
|
|
+ roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
|
|
+ roam_adv_packet->header.version = COMPAT_VERSION;
|
|
+ roam_adv_packet->header.ttl = TTL;
|
|
primary_if = primary_if_get_selected(bat_priv);
|
|
if (!primary_if)
|
|
goto out;
|
|
@@ -1720,45 +1864,53 @@ void tt_free(struct bat_priv *bat_priv)
|
|
kfree(bat_priv->tt_buff);
|
|
}
|
|
|
|
-/* This function will reset the specified flags from all the entries in
|
|
- * the given hash table and will increment num_local_tt for each involved
|
|
- * entry */
|
|
-static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
|
|
+/* This function will enable or disable the specified flags for all the entries
|
|
+ * in the given hash table and returns the number of modified entries */
|
|
+static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
|
|
+ bool enable)
|
|
{
|
|
- int i;
|
|
- struct hashtable_t *hash = bat_priv->tt_local_hash;
|
|
+ uint32_t i;
|
|
+ uint16_t changed_num = 0;
|
|
struct hlist_head *head;
|
|
struct hlist_node *node;
|
|
- struct tt_local_entry *tt_local_entry;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
|
|
if (!hash)
|
|
- return;
|
|
+ goto out;
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
head = &hash->table[i];
|
|
|
|
rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_local_entry, node,
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node,
|
|
head, hash_entry) {
|
|
- if (!(tt_local_entry->flags & flags))
|
|
- continue;
|
|
- tt_local_entry->flags &= ~flags;
|
|
- atomic_inc(&bat_priv->num_local_tt);
|
|
+ if (enable) {
|
|
+ if ((tt_common_entry->flags & flags) == flags)
|
|
+ continue;
|
|
+ tt_common_entry->flags |= flags;
|
|
+ } else {
|
|
+ if (!(tt_common_entry->flags & flags))
|
|
+ continue;
|
|
+ tt_common_entry->flags &= ~flags;
|
|
+ }
|
|
+ changed_num++;
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
-
|
|
+out:
|
|
+ return changed_num;
|
|
}
|
|
|
|
/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
|
|
static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
|
|
{
|
|
struct hashtable_t *hash = bat_priv->tt_local_hash;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
struct tt_local_entry *tt_local_entry;
|
|
struct hlist_node *node, *node_tmp;
|
|
struct hlist_head *head;
|
|
spinlock_t *list_lock; /* protects write access to the hash lists */
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
if (!hash)
|
|
return;
|
|
@@ -1768,16 +1920,19 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
|
|
list_lock = &hash->list_locks[i];
|
|
|
|
spin_lock_bh(list_lock);
|
|
- hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
|
|
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
|
|
head, hash_entry) {
|
|
- if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
|
|
+ if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
|
|
continue;
|
|
|
|
bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
|
|
- "(%pM): pending\n", tt_local_entry->addr);
|
|
+ "(%pM): pending\n", tt_common_entry->addr);
|
|
|
|
atomic_dec(&bat_priv->num_local_tt);
|
|
hlist_del_rcu(node);
|
|
+ tt_local_entry = container_of(tt_common_entry,
|
|
+ struct tt_local_entry,
|
|
+ common);
|
|
tt_local_entry_free_ref(tt_local_entry);
|
|
}
|
|
spin_unlock_bh(list_lock);
|
|
@@ -1787,7 +1942,11 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
|
|
|
|
void tt_commit_changes(struct bat_priv *bat_priv)
|
|
{
|
|
- tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
|
|
+ uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
|
|
+ TT_CLIENT_NEW, false);
|
|
+ /* all the reset entries have now to be effectively counted as local
|
|
+ * entries */
|
|
+ atomic_add(changed_num, &bat_priv->num_local_tt);
|
|
tt_local_purge_pending_clients(bat_priv);
|
|
|
|
/* Increment the TTVN only once per OGM interval */
|
|
@@ -1832,8 +1991,14 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
|
|
uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
|
|
bool full_table = true;
|
|
|
|
- /* the ttvn increased by one -> we can apply the attached changes */
|
|
- if (ttvn - orig_ttvn == 1) {
|
|
+ /* don't care about a backbone gateways updates. */
|
|
+ if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
|
|
+ return;
|
|
+
|
|
+ /* orig table not initialised AND first diff is in the OGM OR the ttvn
|
|
+ * increased by one -> we can apply the attached changes */
|
|
+ if ((!orig_node->tt_initialised && ttvn == 1) ||
|
|
+ ttvn - orig_ttvn == 1) {
|
|
/* the OGM could not contain the changes due to their size or
|
|
* because they have already been sent TT_OGM_APPEND_MAX times.
|
|
* In this case send a tt request */
|
|
@@ -1867,7 +2032,9 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
|
|
} else {
|
|
/* if we missed more than one change or our tables are not
|
|
* in sync anymore -> request fresh tt data */
|
|
- if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
|
|
+ orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
|
|
+ if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
|
|
+ orig_node->tt_crc != tt_crc) {
|
|
request_table:
|
|
bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
|
|
"Need to retrieve the correct information "
|
|
diff --git a/types.h b/types.h
|
|
index ab8d0fe..b60ccdb 100644
|
|
--- a/types.h
|
|
+++ b/types.h
|
|
@@ -81,6 +81,7 @@ struct orig_node {
|
|
int16_t tt_buff_len;
|
|
spinlock_t tt_buff_lock; /* protects tt_buff */
|
|
atomic_t tt_size;
|
|
+ bool tt_initialised;
|
|
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
|
|
* If true, then I sent a Roaming_adv to this orig_node and I have to
|
|
* inspect every packet directed to it to check whether it is still
|
|
@@ -139,6 +140,11 @@ struct neigh_node {
|
|
spinlock_t tq_lock; /* protects: tq_recv, tq_index */
|
|
};
|
|
|
|
+struct bcast_duplist_entry {
|
|
+ uint8_t orig[ETH_ALEN];
|
|
+ uint16_t crc;
|
|
+ unsigned long entrytime;
|
|
+};
|
|
|
|
struct bat_priv {
|
|
atomic_t mesh_state;
|
|
@@ -147,6 +153,7 @@ struct bat_priv {
|
|
atomic_t bonding; /* boolean */
|
|
atomic_t fragmentation; /* boolean */
|
|
atomic_t ap_isolation; /* boolean */
|
|
+ atomic_t bridge_loop_avoidance; /* boolean */
|
|
atomic_t vis_mode; /* VIS_TYPE_* */
|
|
atomic_t gw_mode; /* GW_MODE_* */
|
|
atomic_t gw_sel_class; /* uint */
|
|
@@ -160,6 +167,7 @@ struct bat_priv {
|
|
atomic_t ttvn; /* translation table version number */
|
|
atomic_t tt_ogm_append_cnt;
|
|
atomic_t tt_local_changes; /* changes registered in a OGM interval */
|
|
+ atomic_t bla_num_requests; /* number of bla requests in flight */
|
|
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
|
|
* If true, then I received a Roaming_adv and I have to inspect every
|
|
* packet directed to me to check whether I am still the true
|
|
@@ -173,15 +181,19 @@ struct bat_priv {
|
|
struct hlist_head forw_bat_list;
|
|
struct hlist_head forw_bcast_list;
|
|
struct hlist_head gw_list;
|
|
- struct hlist_head softif_neigh_vids;
|
|
struct list_head tt_changes_list; /* tracks changes in a OGM int */
|
|
struct list_head vis_send_list;
|
|
struct hashtable_t *orig_hash;
|
|
struct hashtable_t *tt_local_hash;
|
|
struct hashtable_t *tt_global_hash;
|
|
+ struct hashtable_t *claim_hash;
|
|
+ struct hashtable_t *backbone_hash;
|
|
struct list_head tt_req_list; /* list of pending tt_requests */
|
|
struct list_head tt_roam_list;
|
|
struct hashtable_t *vis_hash;
|
|
+ struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
|
|
+ int bcast_duplist_curr;
|
|
+ struct bla_claim_dst claim_dest;
|
|
spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
|
|
spinlock_t forw_bcast_list_lock; /* protects */
|
|
spinlock_t tt_changes_list_lock; /* protects tt_changes */
|
|
@@ -190,8 +202,6 @@ struct bat_priv {
|
|
spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
|
|
spinlock_t vis_hash_lock; /* protects vis_hash */
|
|
spinlock_t vis_list_lock; /* protects vis_info::recv_list */
|
|
- spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
|
|
- spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
|
|
atomic_t num_local_tt;
|
|
/* Checksum of the local table, recomputed before sending a new OGM */
|
|
atomic_t tt_crc;
|
|
@@ -201,10 +211,12 @@ struct bat_priv {
|
|
struct delayed_work tt_work;
|
|
struct delayed_work orig_work;
|
|
struct delayed_work vis_work;
|
|
+ struct delayed_work bla_work;
|
|
struct gw_node __rcu *curr_gw; /* rcu protected pointer */
|
|
atomic_t gw_reselect;
|
|
struct hard_iface __rcu *primary_if; /* rcu protected pointer */
|
|
struct vis_info *my_vis_info;
|
|
+ struct bat_algo_ops *bat_algo_ops;
|
|
};
|
|
|
|
struct socket_client {
|
|
@@ -222,24 +234,53 @@ struct socket_packet {
|
|
struct icmp_packet_rr icmp_packet;
|
|
};
|
|
|
|
-struct tt_local_entry {
|
|
+struct tt_common_entry {
|
|
uint8_t addr[ETH_ALEN];
|
|
struct hlist_node hash_entry;
|
|
- unsigned long last_seen;
|
|
uint16_t flags;
|
|
atomic_t refcount;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
+struct tt_local_entry {
|
|
+ struct tt_common_entry common;
|
|
+ unsigned long last_seen;
|
|
+};
|
|
+
|
|
struct tt_global_entry {
|
|
- uint8_t addr[ETH_ALEN];
|
|
- struct hlist_node hash_entry; /* entry in the global table */
|
|
+ struct tt_common_entry common;
|
|
+ struct hlist_head orig_list;
|
|
+ spinlock_t list_lock; /* protects the list */
|
|
+ unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
|
|
+};
|
|
+
|
|
+struct tt_orig_list_entry {
|
|
struct orig_node *orig_node;
|
|
uint8_t ttvn;
|
|
- uint16_t flags; /* only TT_GLOBAL_ROAM is used */
|
|
- unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
|
|
+ struct rcu_head rcu;
|
|
+ struct hlist_node list;
|
|
+};
|
|
+
|
|
+struct backbone_gw {
|
|
+ uint8_t orig[ETH_ALEN];
|
|
+ short vid; /* used VLAN ID */
|
|
+ struct hlist_node hash_entry;
|
|
+ struct bat_priv *bat_priv;
|
|
+ unsigned long lasttime; /* last time we heard of this backbone gw */
|
|
+ atomic_t request_sent;
|
|
atomic_t refcount;
|
|
struct rcu_head rcu;
|
|
+ uint16_t crc; /* crc checksum over all claims */
|
|
+};
|
|
+
|
|
+struct claim {
|
|
+ uint8_t addr[ETH_ALEN];
|
|
+ short vid;
|
|
+ struct backbone_gw *backbone_gw;
|
|
+ unsigned long lasttime; /* last time we heard of claim (locals only) */
|
|
+ struct rcu_head rcu;
|
|
+ atomic_t refcount;
|
|
+ struct hlist_node hash_entry;
|
|
};
|
|
|
|
struct tt_change_node {
|
|
@@ -325,22 +366,23 @@ struct recvlist_node {
|
|
uint8_t mac[ETH_ALEN];
|
|
};
|
|
|
|
-struct softif_neigh_vid {
|
|
+struct bat_algo_ops {
|
|
struct hlist_node list;
|
|
- struct bat_priv *bat_priv;
|
|
- short vid;
|
|
- atomic_t refcount;
|
|
- struct softif_neigh __rcu *softif_neigh;
|
|
- struct rcu_head rcu;
|
|
- struct hlist_head softif_neigh_list;
|
|
-};
|
|
-
|
|
-struct softif_neigh {
|
|
- struct hlist_node list;
|
|
- uint8_t addr[ETH_ALEN];
|
|
- unsigned long last_seen;
|
|
- atomic_t refcount;
|
|
- struct rcu_head rcu;
|
|
+ char *name;
|
|
+ /* init OGM when hard-interface is enabled */
|
|
+ void (*bat_ogm_init)(struct hard_iface *hard_iface);
|
|
+ /* init primary OGM when primary interface is selected */
|
|
+ void (*bat_ogm_init_primary)(struct hard_iface *hard_iface);
|
|
+ /* init mac addresses of the OGM belonging to this hard-interface */
|
|
+ void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
|
|
+ /* prepare a new outgoing OGM for the send queue */
|
|
+ void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
|
|
+ int tt_num_changes);
|
|
+ /* send scheduled OGM */
|
|
+ void (*bat_ogm_emit)(struct forw_packet *forw_packet);
|
|
+ /* receive incoming OGM */
|
|
+ void (*bat_ogm_receive)(struct hard_iface *if_incoming,
|
|
+ struct sk_buff *skb);
|
|
};
|
|
|
|
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
|
|
diff --git a/unicast.c b/unicast.c
|
|
index 07d1c1d..6f3c659 100644
|
|
--- a/unicast.c
|
|
+++ b/unicast.c
|
|
@@ -67,7 +67,7 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
|
|
|
|
memmove(skb->data + uni_diff, skb->data, hdr_len);
|
|
unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff);
|
|
- unicast_packet->packet_type = BAT_UNICAST;
|
|
+ unicast_packet->header.packet_type = BAT_UNICAST;
|
|
|
|
return skb;
|
|
|
|
@@ -251,9 +251,9 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
|
|
|
memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
|
|
|
|
- frag1->ttl--;
|
|
- frag1->version = COMPAT_VERSION;
|
|
- frag1->packet_type = BAT_UNICAST_FRAG;
|
|
+ frag1->header.ttl--;
|
|
+ frag1->header.version = COMPAT_VERSION;
|
|
+ frag1->header.packet_type = BAT_UNICAST_FRAG;
|
|
|
|
memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
memcpy(frag2, frag1, sizeof(*frag2));
|
|
@@ -320,11 +320,11 @@ find_router:
|
|
|
|
unicast_packet = (struct unicast_packet *)skb->data;
|
|
|
|
- unicast_packet->version = COMPAT_VERSION;
|
|
+ unicast_packet->header.version = COMPAT_VERSION;
|
|
/* batman packet type: unicast */
|
|
- unicast_packet->packet_type = BAT_UNICAST;
|
|
+ unicast_packet->header.packet_type = BAT_UNICAST;
|
|
/* set unicast ttl */
|
|
- unicast_packet->ttl = TTL;
|
|
+ unicast_packet->header.ttl = TTL;
|
|
/* copy the destination for faster routing */
|
|
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
|
|
/* set the destination tt version number */
|
|
@@ -335,7 +335,7 @@ find_router:
|
|
data_len + sizeof(*unicast_packet) >
|
|
neigh_node->if_incoming->net_dev->mtu) {
|
|
/* send frag skb decreases ttl */
|
|
- unicast_packet->ttl++;
|
|
+ unicast_packet->header.ttl++;
|
|
ret = frag_send_skb(skb, bat_priv,
|
|
neigh_node->if_incoming, neigh_node->addr);
|
|
goto out;
|
|
diff --git a/vis.c b/vis.c
|
|
index f81a6b6..4f4b2a0 100644
|
|
--- a/vis.c
|
|
+++ b/vis.c
|
|
@@ -66,7 +66,7 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2)
|
|
|
|
/* hash function to choose an entry in a hash table of given size */
|
|
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
|
|
-static int vis_info_choose(const void *data, int size)
|
|
+static uint32_t vis_info_choose(const void *data, uint32_t size)
|
|
{
|
|
const struct vis_info *vis_info = data;
|
|
const struct vis_packet *packet;
|
|
@@ -96,7 +96,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
|
|
struct hlist_head *head;
|
|
struct hlist_node *node;
|
|
struct vis_info *vis_info, *vis_info_tmp = NULL;
|
|
- int index;
|
|
+ uint32_t index;
|
|
|
|
if (!hash)
|
|
return NULL;
|
|
@@ -202,7 +202,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
|
HLIST_HEAD(vis_if_list);
|
|
struct if_list_entry *entry;
|
|
struct hlist_node *pos, *n;
|
|
- int i, j, ret = 0;
|
|
+ uint32_t i;
|
|
+ int j, ret = 0;
|
|
int vis_server = atomic_read(&bat_priv->vis_mode);
|
|
size_t buff_pos, buf_size;
|
|
char *buff;
|
|
@@ -556,7 +557,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
|
|
struct hlist_head *head;
|
|
struct orig_node *orig_node;
|
|
struct vis_packet *packet;
|
|
- int best_tq = -1, i;
|
|
+ int best_tq = -1;
|
|
+ uint32_t i;
|
|
|
|
packet = (struct vis_packet *)info->skb_packet->data;
|
|
|
|
@@ -607,14 +609,15 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
|
|
struct vis_info *info = bat_priv->my_vis_info;
|
|
struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
|
|
struct vis_info_entry *entry;
|
|
- struct tt_local_entry *tt_local_entry;
|
|
- int best_tq = -1, i;
|
|
+ struct tt_common_entry *tt_common_entry;
|
|
+ int best_tq = -1;
|
|
+ uint32_t i;
|
|
|
|
info->first_seen = jiffies;
|
|
packet->vis_type = atomic_read(&bat_priv->vis_mode);
|
|
|
|
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
|
|
- packet->ttl = TTL;
|
|
+ packet->header.ttl = TTL;
|
|
packet->seqno = htonl(ntohl(packet->seqno) + 1);
|
|
packet->entries = 0;
|
|
skb_trim(info->skb_packet, sizeof(*packet));
|
|
@@ -669,13 +672,13 @@ next:
|
|
head = &hash->table[i];
|
|
|
|
rcu_read_lock();
|
|
- hlist_for_each_entry_rcu(tt_local_entry, node, head,
|
|
+ hlist_for_each_entry_rcu(tt_common_entry, node, head,
|
|
hash_entry) {
|
|
entry = (struct vis_info_entry *)
|
|
skb_put(info->skb_packet,
|
|
sizeof(*entry));
|
|
memset(entry->src, 0, ETH_ALEN);
|
|
- memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
|
|
+ memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
|
|
entry->quality = 0; /* 0 means TT */
|
|
packet->entries++;
|
|
|
|
@@ -696,7 +699,7 @@ unlock:
|
|
* held */
|
|
static void purge_vis_packets(struct bat_priv *bat_priv)
|
|
{
|
|
- int i;
|
|
+ uint32_t i;
|
|
struct hashtable_t *hash = bat_priv->vis_hash;
|
|
struct hlist_node *node, *node_tmp;
|
|
struct hlist_head *head;
|
|
@@ -711,8 +714,7 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
|
|
if (info == bat_priv->my_vis_info)
|
|
continue;
|
|
|
|
- if (time_after(jiffies,
|
|
- info->first_seen + VIS_TIMEOUT * HZ)) {
|
|
+ if (has_timed_out(info->first_seen, VIS_TIMEOUT)) {
|
|
hlist_del(node);
|
|
send_list_del(info);
|
|
kref_put(&info->refcount, free_info);
|
|
@@ -733,7 +735,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
|
|
struct sk_buff *skb;
|
|
struct hard_iface *hard_iface;
|
|
uint8_t dstaddr[ETH_ALEN];
|
|
- int i;
|
|
+ uint32_t i;
|
|
|
|
|
|
packet = (struct vis_packet *)info->skb_packet->data;
|
|
@@ -815,19 +817,19 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
|
|
goto out;
|
|
|
|
packet = (struct vis_packet *)info->skb_packet->data;
|
|
- if (packet->ttl < 2) {
|
|
+ if (packet->header.ttl < 2) {
|
|
pr_debug("Error - can't send vis packet: ttl exceeded\n");
|
|
goto out;
|
|
}
|
|
|
|
memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
|
|
- packet->ttl--;
|
|
+ packet->header.ttl--;
|
|
|
|
if (is_broadcast_ether_addr(packet->target_orig))
|
|
broadcast_vis_packet(bat_priv, info);
|
|
else
|
|
unicast_vis_packet(bat_priv, info);
|
|
- packet->ttl++; /* restore TTL */
|
|
+ packet->header.ttl++; /* restore TTL */
|
|
|
|
out:
|
|
if (primary_if)
|
|
@@ -907,9 +909,9 @@ int vis_init(struct bat_priv *bat_priv)
|
|
INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
|
|
kref_init(&bat_priv->my_vis_info->refcount);
|
|
bat_priv->my_vis_info->bat_priv = bat_priv;
|
|
- packet->version = COMPAT_VERSION;
|
|
- packet->packet_type = BAT_VIS;
|
|
- packet->ttl = TTL;
|
|
+ packet->header.version = COMPAT_VERSION;
|
|
+ packet->header.packet_type = BAT_VIS;
|
|
+ packet->header.ttl = TTL;
|
|
packet->seqno = 0;
|
|
packet->entries = 0;
|
|
|
|
diff --git a/vis.h b/vis.h
|
|
index 31b820d..851bc4f 100644
|
|
--- a/vis.h
|
|
+++ b/vis.h
|
|
@@ -22,7 +22,8 @@
|
|
#ifndef _NET_BATMAN_ADV_VIS_H_
|
|
#define _NET_BATMAN_ADV_VIS_H_
|
|
|
|
-#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */
|
|
+#define VIS_TIMEOUT 200000 /* timeout of vis packets
|
|
+ * in miliseconds */
|
|
|
|
int vis_seq_print_text(struct seq_file *seq, void *offset);
|
|
void receive_server_sync_packet(struct bat_priv *bat_priv,
|