struct batman_if *if_incoming,
int own_packet)
{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct forw_packet *forw_packet_aggr;
unsigned long flags;
unsigned char *skb_buff;
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
/* own packet should always be scheduled */
if (!own_packet) {
forw_packet_aggr->direct_link_flags |= 1;
/* add new packet to packet list */
- spin_lock_irqsave(&forw_bat_list_lock, flags);
- hlist_add_head(&forw_packet_aggr->list, &forw_bat_list);
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
unsigned long flags;
/* find position for the packet in the forward queue */
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
/* own packets are not to be aggregated */
if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
- hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list,
- list) {
+ hlist_for_each_entry(forw_packet_pos, tmp_node,
+ &bat_priv->forw_bat_list, list) {
if (can_aggregate_with(batman_packet,
packet_len,
send_time,
* suitable aggregation packet found */
if (forw_packet_aggr == NULL) {
/* the following section can run without the lock */
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/**
* if we could not aggregate this packet with one of the others
aggregate(forw_packet_aggr,
packet_buff, packet_len,
direct_link);
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
}
}
return sprintf(buff, "%s\n",
batman_if->if_status == IF_NOT_IN_USE ?
- "none" : "bat0");
+ "none" : batman_if->soft_iface->name);
}
static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
return 1;
}
-static struct batman_if *get_active_batman_if(void)
+static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
{
struct batman_if *batman_if;
- /* TODO: should check interfaces belonging to bat_priv */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if (batman_if->soft_iface != soft_iface)
+ continue;
+
if (batman_if->if_status == IF_ACTIVE)
goto out;
}
struct batman_if *batman_if)
{
struct batman_packet *batman_packet;
+ struct vis_packet *vis_packet;
bat_priv->primary_if = batman_if;
if (!bat_priv->primary_if)
return;
- set_main_if_addr(batman_if->net_dev->dev_addr);
-
batman_packet = (struct batman_packet *)(batman_if->packet_buff);
batman_packet->flags = PRIMARIES_FIRST_HOP;
batman_packet->ttl = TTL;
+ vis_packet = (struct vis_packet *)
+ bat_priv->my_vis_info->skb_packet->data;
+ memcpy(vis_packet->vis_orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(vis_packet->sender_orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+
/***
* hacky trick to make sure that we send the HNA information via
* our new primary interface
*/
- atomic_set(&hna_local_changed, 1);
+ atomic_set(&bat_priv->hna_local_changed, 1);
}
static bool hardif_is_iface_up(struct batman_if *batman_if)
bat_info(batman_if->soft_iface, "Interface activated: %s\n",
batman_if->dev);
- if (atomic_read(&module_state) == MODULE_INACTIVE)
- activate_module();
-
update_min_mtu(batman_if->soft_iface);
return;
}
orig_hash_del_if(batman_if, bat_priv->num_ifaces);
if (batman_if == bat_priv->primary_if)
- set_primary_if(bat_priv, get_active_batman_if());
+ set_primary_if(bat_priv,
+ get_active_batman_if(batman_if->soft_iface));
kfree(batman_if->packet_buff);
batman_if->packet_buff = NULL;
batman_if->if_status = IF_NOT_IN_USE;
+
+ /* delete all references to this batman_if */
+ purge_orig_ref(bat_priv);
+ purge_outstanding_packets(bat_priv, batman_if);
dev_put(batman_if->soft_iface);
/* nobody uses this interface anymore */
softif_destroy(batman_if->soft_iface);
batman_if->soft_iface = NULL;
-
- /*if ((atomic_read(&module_state) == MODULE_ACTIVE) &&
- (bat_priv->num_ifaces == 0))
- deactivate_module();*/
}
static struct batman_if *hardif_add_interface(struct net_device *net_dev)
{
struct batman_if *batman_if = container_of(rcu, struct batman_if, rcu);
- /* delete all references to this batman_if */
- purge_orig(NULL);
- purge_outstanding_packets(batman_if);
-
kfree(batman_if->dev);
kfree(batman_if);
}
if (!skb)
goto err_out;
- if (atomic_read(&module_state) != MODULE_ACTIVE)
- goto err_free;
-
/* packet should hold at least type and version */
if (unlikely(!pskb_may_pull(skb, 2)))
goto err_free;
|| !skb_mac_header(skb)))
goto err_free;
+ if (!batman_if->soft_iface)
+ goto err_free;
+
+ bat_priv = netdev_priv(batman_if->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ goto err_free;
+
/* discard frames on not active interfaces */
if (batman_if->if_status != IF_ACTIVE)
goto err_free;
batman_packet = (struct batman_packet *)skb->data;
- bat_priv = netdev_priv(batman_if->soft_iface);
if (batman_packet->version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
goto free_skb;
}
- if (atomic_read(&module_state) != MODULE_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dst_unreach;
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_node = (struct orig_node *)hash_find(orig_hash, icmp_packet->dst);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
+ icmp_packet->dst));
if (!orig_node)
goto unlock;
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if (!batman_if)
goto dst_unreach;
goto out;
unlock:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
dst_unreach:
icmp_packet->msg_type = DESTINATION_UNREACHABLE;
bat_socket_add_packet(socket_client, icmp_packet, packet_len);
#include "hash.h"
struct list_head if_list;
-struct hlist_head forw_bat_list;
-struct hlist_head forw_bcast_list;
-struct hashtable_t *orig_hash;
-
-DEFINE_SPINLOCK(orig_hash_lock);
-DEFINE_SPINLOCK(forw_bat_list_lock);
-DEFINE_SPINLOCK(forw_bcast_list_lock);
-
-int16_t num_hna;
unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-atomic_t module_state;
struct workqueue_struct *bat_event_workqueue;
static int __init batman_init(void)
{
INIT_LIST_HEAD(&if_list);
- INIT_HLIST_HEAD(&forw_bat_list);
- INIT_HLIST_HEAD(&forw_bcast_list);
-
- atomic_set(&module_state, MODULE_INACTIVE);
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
static void __exit batman_exit(void)
{
- deactivate_module();
-
debugfs_destroy();
unregister_netdevice_notifier(&hard_if_notifier);
hardif_remove_interfaces();
+ flush_workqueue(bat_event_workqueue);
destroy_workqueue(bat_event_workqueue);
bat_event_workqueue = NULL;
}
-/* activates the module, starts timer ... */
-void activate_module(void)
+int mesh_init(struct net_device *soft_iface)
{
- if (originator_init() < 1)
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
+
+ spin_lock_init(&bat_priv->orig_hash_lock);
+ spin_lock_init(&bat_priv->forw_bat_list_lock);
+ spin_lock_init(&bat_priv->forw_bcast_list_lock);
+ spin_lock_init(&bat_priv->hna_lhash_lock);
+ spin_lock_init(&bat_priv->hna_ghash_lock);
+ spin_lock_init(&bat_priv->gw_list_lock);
+ spin_lock_init(&bat_priv->vis_hash_lock);
+ spin_lock_init(&bat_priv->vis_list_lock);
+
+ INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
+ INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
+ INIT_HLIST_HEAD(&bat_priv->gw_list);
+
+ if (originator_init(bat_priv) < 1)
goto err;
- if (hna_local_init() < 1)
+ if (hna_local_init(bat_priv) < 1)
goto err;
- if (hna_global_init() < 1)
+ if (hna_global_init(bat_priv) < 1)
goto err;
- /*hna_local_add(soft_device->dev_addr);*/
+ hna_local_add(soft_iface, soft_iface->dev_addr);
- if (vis_init() < 1)
+ if (vis_init(bat_priv) < 1)
goto err;
- /*update_min_mtu();*/
- atomic_set(&module_state, MODULE_ACTIVE);
+ atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
goto end;
err:
pr_err("Unable to allocate memory for mesh information structures: "
"out of mem ?\n");
- deactivate_module();
+ mesh_free(soft_iface);
+ return -1;
+
end:
- return;
+ return 0;
}
-/* shuts down the whole module.*/
-void deactivate_module(void)
+void mesh_free(struct net_device *soft_iface)
{
- atomic_set(&module_state, MODULE_DEACTIVATING);
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
- purge_outstanding_packets(NULL);
- flush_workqueue(bat_event_workqueue);
+ atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
+
+ purge_outstanding_packets(bat_priv, NULL);
- vis_quit();
+ vis_quit(bat_priv);
- originator_free();
+ originator_free(bat_priv);
- hna_local_free();
- hna_global_free();
+ hna_local_free(bat_priv);
+ hna_global_free(bat_priv);
synchronize_net();
synchronize_rcu();
- atomic_set(&module_state, MODULE_INACTIVE);
+ atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
}
void inc_module_count(void)
#define EXPECTED_SEQNO_RANGE 65536
/* don't reset again within 30 seconds */
-#define MODULE_INACTIVE 0
-#define MODULE_ACTIVE 1
-#define MODULE_DEACTIVATING 2
+#define MESH_INACTIVE 0
+#define MESH_ACTIVE 1
+#define MESH_DEACTIVATING 2
#define BCAST_QUEUE_LEN 256
#define BATMAN_QUEUE_LEN 256
#endif
extern struct list_head if_list;
-extern struct hlist_head forw_bat_list;
-extern struct hlist_head forw_bcast_list;
-extern struct hashtable_t *orig_hash;
-
-extern spinlock_t orig_hash_lock;
-extern spinlock_t forw_bat_list_lock;
-extern spinlock_t forw_bcast_list_lock;
-
-extern int16_t num_hna;
extern unsigned char broadcast_addr[];
-extern atomic_t module_state;
extern struct workqueue_struct *bat_event_workqueue;
-void activate_module(void);
-void deactivate_module(void);
+int mesh_init(struct net_device *soft_iface);
+void mesh_free(struct net_device *soft_iface);
void inc_module_count(void);
void dec_module_count(void);
int addr_to_string(char *buff, uint8_t *addr);
int is_mcast(uint8_t *addr);
#ifdef CONFIG_BATMAN_ADV_DEBUG
-extern int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
+int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
#define bat_dbg(type, bat_priv, fmt, arg...) \
do { \
#include "hard-interface.h"
#include "unicast.h"
-static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig);
+static void purge_orig(struct work_struct *work);
-static void start_purge_timer(void)
+static void start_purge_timer(struct bat_priv *bat_priv)
{
- queue_delayed_work(bat_event_workqueue, &purge_orig_wq, 1 * HZ);
+ INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
+ queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
}
-int originator_init(void)
+int originator_init(struct bat_priv *bat_priv)
{
unsigned long flags;
- if (orig_hash)
+ if (bat_priv->orig_hash)
return 1;
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_hash = hash_new(128, compare_orig, choose_orig);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ bat_priv->orig_hash = hash_new(128, compare_orig, choose_orig);
- if (!orig_hash)
+ if (!bat_priv->orig_hash)
goto err;
- spin_unlock_irqrestore(&orig_hash_lock, flags);
- start_purge_timer();
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ start_purge_timer(bat_priv);
return 1;
err:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
}
kfree(orig_node);
}
-void originator_free(void)
+void originator_free(struct bat_priv *bat_priv)
{
unsigned long flags;
- if (!orig_hash)
+ if (!bat_priv->orig_hash)
return;
- cancel_delayed_work_sync(&purge_orig_wq);
+ cancel_delayed_work_sync(&bat_priv->orig_work);
- spin_lock_irqsave(&orig_hash_lock, flags);
- /*hash_delete(orig_hash, free_orig_node, bat_priv);*/
- orig_hash = NULL;
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
+ bat_priv->orig_hash = NULL;
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
/* this function finds or creates an originator entry for the given
struct hashtable_t *swaphash;
int size;
- orig_node = ((struct orig_node *)hash_find(orig_hash, addr));
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, addr));
- if (orig_node != NULL)
+ if (orig_node)
return orig_node;
bat_dbg(DBG_BATMAN, bat_priv,
if (!orig_node->bcast_own_sum)
goto free_bcast_own;
- if (hash_add(orig_hash, orig_node) < 0)
+ if (hash_add(bat_priv->orig_hash, orig_node) < 0)
goto free_bcast_own_sum;
- if (orig_hash->elements * 4 > orig_hash->size) {
- swaphash = hash_resize(orig_hash, orig_hash->size * 2);
+ if (bat_priv->orig_hash->elements * 4 > bat_priv->orig_hash->size) {
+ swaphash = hash_resize(bat_priv->orig_hash,
+ bat_priv->orig_hash->size * 2);
- if (swaphash == NULL)
+ if (!swaphash)
bat_dbg(DBG_BATMAN, bat_priv,
"Couldn't resize orig hash table\n");
else
- orig_hash = swaphash;
+ bat_priv->orig_hash = swaphash;
}
return orig_node;
if ((time_after(jiffies,
neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
- (neigh_node->if_incoming->if_status ==
- IF_TO_BE_REMOVED)) {
+ (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
+ (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
if (neigh_node->if_incoming->if_status ==
IF_TO_BE_REMOVED)
return false;
}
-void purge_orig(struct work_struct *work)
+static void _purge_orig(struct bat_priv *bat_priv)
{
HASHIT(hashit);
struct orig_node *orig_node;
unsigned long flags;
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
/* for all origins... */
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
- /*if (purge_orig_node(bat_priv, orig_node)) {
- hash_remove_bucket(orig_hash, &hashit);
- free_orig_node(orig_node);
- }*/
+ if (purge_orig_node(bat_priv, orig_node)) {
+ hash_remove_bucket(bat_priv->orig_hash, &hashit);
+ free_orig_node(orig_node, bat_priv);
+ }
if (time_after(jiffies, (orig_node->last_frag_packet +
msecs_to_jiffies(FRAG_TIMEOUT))))
frag_list_free(&orig_node->frag_list);
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+
+}
+
+static void purge_orig(struct work_struct *work)
+{
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct bat_priv *bat_priv =
+ container_of(delayed_work, struct bat_priv, orig_work);
+
+ _purge_orig(bat_priv);
+ start_purge_timer(bat_priv);
+}
- /* if work == NULL we were not called by the timer
- * and thus do not need to re-arm the timer */
- if (work)
- start_purge_timer();
+void purge_orig_ref(struct bat_priv *bat_priv)
+{
+ _purge_orig(bat_priv);
}
int orig_seq_print_text(struct seq_file *seq, void *offset)
"outgoingIF", "Potential nexthops");
rcu_read_unlock();
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
batman_count++;
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if ((batman_count == 0))
seq_printf(seq, "No batman nodes in range ...\n");
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
{
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct orig_node *orig_node;
unsigned long flags;
HASHIT(hashit);
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
if (orig_node_add_if(orig_node, max_if_num) == -1)
goto err;
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
err:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return -ENOMEM;
}
int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
{
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
unsigned long flags;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
ret = orig_node_del_if(orig_node, max_if_num,
if (batman_if == batman_if_tmp)
continue;
+ if (batman_if->soft_iface != batman_if_tmp->soft_iface)
+ continue;
+
if (batman_if_tmp->if_num > batman_if->if_num)
batman_if_tmp->if_num--;
}
rcu_read_unlock();
batman_if->if_num = -1;
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
err:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return -ENOMEM;
}
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
#define _NET_BATMAN_ADV_ORIGINATOR_H_
-int originator_init(void);
-void originator_free(void);
-void purge_orig(struct work_struct *work);
+int originator_init(struct bat_priv *bat_priv);
+void originator_free(struct bat_priv *bat_priv);
+void purge_orig_ref(struct bat_priv *bat_priv);
struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
struct neigh_node *
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
#include "aggregation.h"
#include "unicast.h"
-static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
-
void slide_own_bcast_window(struct batman_if *batman_if)
{
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
TYPE_OF_WORD *word;
unsigned long flags;
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
word = &(orig_node->bcast_own[batman_if->if_num * NUM_WORDS]);
bit_packet_count(word);
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
if (batman_if->if_status != IF_ACTIVE)
continue;
+ if (batman_if->soft_iface != if_incoming->soft_iface)
+ continue;
+
if (compare_orig(ethhdr->h_source,
batman_if->net_dev->dev_addr))
is_my_addr = 1;
0, hna_buff_len, if_incoming);
}
-int recv_bat_packet(struct sk_buff *skb,
- struct batman_if *batman_if)
+int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
{
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct ethhdr *ethhdr;
unsigned long flags;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
receive_aggr_bat_packet(ethhdr,
skb->data,
skb_headlen(skb),
batman_if);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
-static int recv_my_icmp_packet(struct sk_buff *skb,
- struct batman_if *recv_if, size_t icmp_len)
+static int recv_my_icmp_packet(struct bat_priv *bat_priv,
+ struct sk_buff *skb, size_t icmp_len)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
/* answer echo request (ping) */
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_node = ((struct orig_node *)hash_find(orig_hash,
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
icmp_packet->orig));
ret = NET_RX_DROP;
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return ret;
}
-static int recv_icmp_ttl_exceeded(struct sk_buff *skb,
- struct batman_if *recv_if, size_t icmp_len)
+static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
+ struct sk_buff *skb, size_t icmp_len)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
struct ethhdr *ethhdr;
return NET_RX_DROP;
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, icmp_packet->orig));
+ hash_find(bat_priv->orig_hash, icmp_packet->orig));
ret = NET_RX_DROP;
if ((orig_node != NULL) &&
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return ret;
}
int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
struct orig_node *orig_node;
/* packet for me */
if (is_my_mac(icmp_packet->dst))
- return recv_my_icmp_packet(skb, recv_if, hdr_size);
+ return recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */
if (icmp_packet->ttl < 2)
- return recv_icmp_ttl_exceeded(skb, recv_if, hdr_size);
+ return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
ret = NET_RX_DROP;
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, icmp_packet->dst));
+ hash_find(bat_priv->orig_hash, icmp_packet->dst));
if ((orig_node != NULL) &&
(orig_node->router != NULL)) {
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return ret;
}
router_orig->orig, ETH_ALEN) == 0) {
primary_orig_node = router_orig;
} else {
- primary_orig_node = hash_find(orig_hash,
+ primary_orig_node = hash_find(bat_priv->orig_hash,
router_orig->primary_addr);
+
if (!primary_orig_node)
return orig_node->router;
}
static int route_unicast_packet(struct sk_buff *skb,
struct batman_if *recv_if, int hdr_size)
{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node;
struct neigh_node *router;
struct batman_if *batman_if;
}
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, unicast_packet->dest));
+ hash_find(bat_priv->orig_hash, unicast_packet->dest));
router = find_router(orig_node, recv_if);
if (!router) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
batman_if = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *) skb->data;
+ unicast_packet = (struct unicast_packet *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* decrement ttl */
int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct unicast_frag_packet *unicast_packet;
struct orig_node *orig_node;
struct frag_packet_list_entry *tmp_frag_entry;
if (check_unicast_packet(skb, hdr_size) < 0)
return NET_RX_DROP;
- unicast_packet = (struct unicast_frag_packet *) skb->data;
+ unicast_packet = (struct unicast_frag_packet *)skb->data;
/* packet for me */
if (is_my_mac(unicast_packet->dest)) {
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, unicast_packet->orig));
+ hash_find(bat_priv->orig_hash, unicast_packet->orig));
if (!orig_node) {
pr_warning("couldn't find orig node for "
"fragmentation\n");
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
+ flags);
return NET_RX_DROP;
}
tmp_frag_entry =
search_frag_packet(&orig_node->frag_list,
- unicast_packet);
+ unicast_packet);
if (!tmp_frag_entry) {
create_frag_entry(&orig_node->frag_list, skb);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
+ flags);
return NET_RX_SUCCESS;
}
skb = merge_frag_packet(&orig_node->frag_list,
- tmp_frag_entry, skb);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ tmp_frag_entry, skb);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if (!skb)
return NET_RX_DROP;
}
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *batman_if)
+int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node;
struct bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
int hdr_size = sizeof(struct bcast_packet);
int32_t seq_diff;
unsigned long flags;
if (bcast_packet->ttl < 2)
return NET_RX_DROP;
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, bcast_packet->orig));
+ hash_find(bat_priv->orig_hash, bcast_packet->orig));
if (orig_node == NULL) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
if (get_bit_status(orig_node->bcast_bits,
orig_node->last_bcast_seqno,
ntohl(bcast_packet->seqno))) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
/* check whether the packet is old and the host just restarted. */
if (window_protected(bat_priv, seq_diff,
&orig_node->bcast_seqno_reset)) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* rebroadcast packet */
add_bcast_packet_to_list(bat_priv, skb);
/* broadcast for me */
- interface_rx(batman_if->soft_iface, skb, hdr_size);
+ interface_rx(recv_if->soft_iface, skb, hdr_size);
return NET_RX_SUCCESS;
}
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *batman_if)
+int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
struct vis_packet *vis_packet;
struct ethhdr *ethhdr;
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
int hdr_size = sizeof(struct vis_packet);
/* keep skb linear */
int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bat_packet(struct sk_buff *skb,
- struct batman_if *batman_if);
+int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
struct neigh_node *find_router(struct orig_node *orig_node,
struct batman_if *recv_if);
void update_bonding_candidates(struct bat_priv *bat_priv,
static void send_packet(struct forw_packet *forw_packet)
{
struct batman_if *batman_if;
- struct bat_priv *bat_priv =
- netdev_priv(forw_packet->if_incoming->soft_iface);
+ struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct batman_packet *batman_packet =
(struct batman_packet *)(forw_packet->skb->data);
unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
/* broadcast on every interface */
rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list)
+ list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if (batman_if->soft_iface != soft_iface)
+ continue;
+
send_packet_to_if(forw_packet, batman_if);
+ }
rcu_read_unlock();
}
-static void rebuild_batman_packet(struct batman_if *batman_if)
+static void rebuild_batman_packet(struct bat_priv *bat_priv,
+ struct batman_if *batman_if)
{
int new_len;
unsigned char *new_buff;
struct batman_packet *batman_packet;
- new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
+ new_len = sizeof(struct batman_packet) +
+ (bat_priv->num_local_hna * ETH_ALEN);
new_buff = kmalloc(new_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
sizeof(struct batman_packet));
batman_packet = (struct batman_packet *)new_buff;
- batman_packet->num_hna = hna_local_fill_buffer(
- new_buff + sizeof(struct batman_packet),
- new_len - sizeof(struct batman_packet));
+ batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
+ new_buff + sizeof(struct batman_packet),
+ new_len - sizeof(struct batman_packet));
kfree(batman_if->packet_buff);
batman_if->packet_buff = new_buff;
batman_if->if_status = IF_ACTIVE;
/* if local hna has changed and interface is a primary interface */
- if ((atomic_read(&hna_local_changed)) &&
+ if ((atomic_read(&bat_priv->hna_local_changed)) &&
(batman_if == bat_priv->primary_if))
- rebuild_batman_packet(batman_if);
+ rebuild_batman_packet(bat_priv, batman_if);
/**
* NOTE: packet_buff might just have been re-allocated in
kfree(forw_packet);
}
-static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
+static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
+ struct forw_packet *forw_packet,
unsigned long send_time)
{
unsigned long flags;
INIT_HLIST_NODE(&forw_packet->list);
/* add new packet to packet list */
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
- hlist_add_head(&forw_packet->list, &forw_bcast_list);
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
+ hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
goto out;
}
+ if (!bat_priv->primary_if)
+ goto out;
+
forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
if (!forw_packet)
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
- _add_bcast_packet_to_list(forw_packet, 1);
+ _add_bcast_packet_to_list(bat_priv, forw_packet, 1);
return NETDEV_TX_OK;
packet_free:
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;
struct sk_buff *skb1;
- struct bat_priv *bat_priv;
+ struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
- if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
/* rebroadcast packet */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if (batman_if->soft_iface != soft_iface)
+ continue;
+
/* send a copy of the saved skb */
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb1)
- send_skb_packet(skb1,
- batman_if, broadcast_addr);
+ send_skb_packet(skb1, batman_if, broadcast_addr);
}
rcu_read_unlock();
/* if we still have some more bcasts to send */
if (forw_packet->num_packets < 3) {
- _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
+ _add_bcast_packet_to_list(bat_priv, forw_packet,
+ ((5 * HZ) / 1000));
return;
}
out:
- bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
forw_packet_free(forw_packet);
atomic_inc(&bat_priv->bcast_queue_left);
}
unsigned long flags;
struct bat_priv *bat_priv;
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
- if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
send_packet(forw_packet);
schedule_own_packet(forw_packet->if_incoming);
out:
- bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
-
/* don't count own packet */
if (!forw_packet->own)
atomic_inc(&bat_priv->batman_queue_left);
forw_packet_free(forw_packet);
}
-void purge_outstanding_packets(struct batman_if *batman_if)
+void purge_outstanding_packets(struct bat_priv *bat_priv,
+ struct batman_if *batman_if)
{
- struct bat_priv *bat_priv;
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
unsigned long flags;
- if (batman_if->soft_iface) {
- bat_priv = netdev_priv(batman_if->soft_iface);
-
- if (batman_if)
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets(): %s\n",
- batman_if->dev);
- else
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets()\n");
- }
+ if (batman_if)
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets(): %s\n",
+ batman_if->dev);
+ else
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets()\n");
/* free bcast list */
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &forw_bcast_list, list) {
+ &bat_priv->forw_bcast_list, list) {
/**
* if purge_outstanding_packets() was called with an argmument
(forw_packet->if_incoming != batman_if))
continue;
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/**
* send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
}
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/* free batman packet list */
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &forw_bat_list, list) {
+ &bat_priv->forw_bat_list, list) {
/**
* if purge_outstanding_packets() was called with an argmument
(forw_packet->if_incoming != batman_if))
continue;
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/**
* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
}
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
}
struct batman_if *if_outgoing);
int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
void send_outstanding_bat_packet(struct work_struct *work);
-void purge_outstanding_packets(struct batman_if *batman_if);
+void purge_outstanding_packets(struct bat_priv *bat_priv,
+ struct batman_if *batman_if);
#endif /* _NET_BATMAN_ADV_SEND_H_ */
#include <linux/etherdevice.h>
#include "unicast.h"
-static uint32_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid
- * broadcast storms */
-unsigned char main_if_addr[ETH_ALEN];
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static void bat_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info);
.set_rx_csum = bat_set_rx_csum
};
-void set_main_if_addr(uint8_t *addr)
-{
- memcpy(main_if_addr, addr, ETH_ALEN);
-}
-
int my_skb_head_push(struct sk_buff *skb, unsigned int len)
{
int result;
* to write freely in that area.
*/
result = skb_cow_head(skb, len);
-
if (result < 0)
return result;
return -EADDRNOTAVAIL;
/* only modify hna-table if it has been initialised before */
- if (atomic_read(&module_state) == MODULE_ACTIVE) {
+ if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
hna_local_remove(bat_priv, dev->dev_addr,
"mac address changed");
hna_local_add(dev, addr->sa_data);
struct bcast_packet *bcast_packet;
int data_len = skb->len, ret;
- if (atomic_read(&module_state) != MODULE_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dropped;
soft_iface->trans_start = jiffies;
/* ethernet packet should be broadcasted */
if (is_bcast(ethhdr->h_dest) || is_mcast(ethhdr->h_dest)) {
+ if (!bat_priv->primary_if)
+ goto dropped;
if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
goto dropped;
/* hw address of first interface is the orig mac because only
* this mac is known throughout the mesh */
- memcpy(bcast_packet->orig, main_if_addr, ETH_ALEN);
+ memcpy(bcast_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
/* set broadcast sequence number */
- bcast_packet->seqno = htonl(bcast_seqno);
+ bcast_packet->seqno =
+ htonl(atomic_inc_return(&bat_priv->bcast_seqno));
- /* broadcast packet. on success, increase seqno. */
- if (add_bcast_packet_to_list(bat_priv, skb) == NETDEV_TX_OK)
- bcast_seqno++;
+ add_bcast_packet_to_list(bat_priv, skb);
/* a copy is stored in the bcast list, therefore removing
* the original skb. */
/* unicast packet */
} else {
ret = unicast_send_skb(skb, bat_priv);
- if (ret != 0) {
- bat_priv->stats.tx_dropped++;
- goto end;
- }
+ if (ret != 0)
+ goto dropped_freed;
}
bat_priv->stats.tx_packets++;
goto end;
dropped:
- bat_priv->stats.tx_dropped++;
kfree_skb(skb);
+dropped_freed:
+ bat_priv->stats.tx_dropped++;
end:
return NETDEV_TX_OK;
}
}
ret = register_netdev(soft_iface);
-
if (ret < 0) {
pr_err("Unable to register the batman interface '%s': %i\n",
name, ret);
atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
+ atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+ atomic_set(&bat_priv->bcast_seqno, 1);
+ atomic_set(&bat_priv->hna_local_changed, 0);
+
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
ret = sysfs_add_meshif(soft_iface);
-
if (ret < 0)
goto unreg_soft_iface;
ret = debugfs_add_meshif(soft_iface);
-
if (ret < 0)
goto unreg_sysfs;
+ ret = mesh_init(soft_iface);
+ if (ret < 0)
+ goto unreg_debugfs;
+
return soft_iface;
+unreg_debugfs:
+ debugfs_del_meshif(soft_iface);
unreg_sysfs:
sysfs_del_meshif(soft_iface);
unreg_soft_iface:
{
debugfs_del_meshif(soft_iface);
sysfs_del_meshif(soft_iface);
+ mesh_free(soft_iface);
unregister_netdevice(soft_iface);
}
#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-void set_main_if_addr(uint8_t *addr);
int my_skb_head_push(struct sk_buff *skb, unsigned int len);
int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
void interface_rx(struct net_device *soft_iface,
struct net_device *softif_create(char *name);
void softif_destroy(struct net_device *soft_iface);
-extern unsigned char main_if_addr[];
-
#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
#include "types.h"
#include "hash.h"
-struct hashtable_t *hna_local_hash;
-static struct hashtable_t *hna_global_hash;
-atomic_t hna_local_changed;
-
-DEFINE_SPINLOCK(hna_local_hash_lock);
-static DEFINE_SPINLOCK(hna_global_hash_lock);
-
static void hna_local_purge(struct work_struct *work);
-static DECLARE_DELAYED_WORK(hna_local_purge_wq, hna_local_purge);
static void _hna_global_del_orig(struct bat_priv *bat_priv,
struct hna_global_entry *hna_global_entry,
char *message);
-static void hna_local_start_timer(void)
+static void hna_local_start_timer(struct bat_priv *bat_priv)
{
- queue_delayed_work(bat_event_workqueue, &hna_local_purge_wq, 10 * HZ);
+ INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
+ queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
}
-int hna_local_init(void)
+int hna_local_init(struct bat_priv *bat_priv)
{
- if (hna_local_hash)
+ if (bat_priv->hna_local_hash)
return 1;
- hna_local_hash = hash_new(128, compare_orig, choose_orig);
+ bat_priv->hna_local_hash = hash_new(128, compare_orig, choose_orig);
- if (!hna_local_hash)
+ if (!bat_priv->hna_local_hash)
return 0;
- atomic_set(&hna_local_changed, 0);
- hna_local_start_timer();
+ atomic_set(&bat_priv->hna_local_changed, 0);
+ hna_local_start_timer(bat_priv);
return 1;
}
struct hashtable_t *swaphash;
unsigned long flags;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
hna_local_entry =
- ((struct hna_local_entry *)hash_find(hna_local_hash, addr));
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
+ addr));
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
- if (hna_local_entry != NULL) {
+ if (hna_local_entry) {
hna_local_entry->last_seen = jiffies;
return;
}
/* only announce as many hosts as possible in the batman-packet and
space in batman_packet->num_hna That also should give a limit to
MAC-flooding. */
- if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
- (num_hna + 1 > 255)) {
+ if ((bat_priv->num_local_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN)
+ / ETH_ALEN) ||
+ (bat_priv->num_local_hna + 1 > 255)) {
bat_dbg(DBG_ROUTES, bat_priv,
"Can't add new local hna entry (%pM): "
"number of local hna entries exceeds packet size\n",
else
hna_local_entry->never_purge = 0;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- hash_add(hna_local_hash, hna_local_entry);
- num_hna++;
- atomic_set(&hna_local_changed, 1);
+ hash_add(bat_priv->hna_local_hash, hna_local_entry);
+ bat_priv->num_local_hna++;
+ atomic_set(&bat_priv->hna_local_changed, 1);
- if (hna_local_hash->elements * 4 > hna_local_hash->size) {
- swaphash = hash_resize(hna_local_hash,
- hna_local_hash->size * 2);
+ if (bat_priv->hna_local_hash->elements * 4 >
+ bat_priv->hna_local_hash->size) {
+ swaphash = hash_resize(bat_priv->hna_local_hash,
+ bat_priv->hna_local_hash->size * 2);
- if (swaphash == NULL)
+ if (!swaphash)
pr_err("Couldn't resize local hna hash table\n");
else
- hna_local_hash = swaphash;
+ bat_priv->hna_local_hash = swaphash;
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
/* remove address from global hash if present */
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
- hna_global_entry =
- ((struct hna_global_entry *)hash_find(hna_global_hash, addr));
+ hna_global_entry = ((struct hna_global_entry *)
+ hash_find(bat_priv->hna_global_hash, addr));
- if (hna_global_entry != NULL)
+ if (hna_global_entry)
_hna_global_del_orig(bat_priv, hna_global_entry,
"local hna received");
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
}
-int hna_local_fill_buffer(unsigned char *buff, int buff_len)
+int hna_local_fill_buffer(struct bat_priv *bat_priv,
+ unsigned char *buff, int buff_len)
{
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
int i = 0;
unsigned long flags;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- while (hash_iterate(hna_local_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
if (buff_len < (i + 1) * ETH_ALEN)
break;
}
/* if we did not get all new local hnas see you next time ;-) */
- if (i == num_hna)
- atomic_set(&hna_local_changed, 0);
-
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ if (i == bat_priv->num_local_hna)
+ atomic_set(&bat_priv->hna_local_changed, 0);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
return i;
}
"announced via HNA:\n",
net_dev->name);
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
- while (hash_iterate(hna_local_hash, &hashit_count))
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit_count))
buf_size += 21;
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
return -ENOMEM;
}
buff[0] = '\0';
pos = 0;
- while (hash_iterate(hna_local_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
hna_local_entry = hashit.bucket->data;
pos += snprintf(buff + pos, 22, " * %pM\n",
hna_local_entry->addr);
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
seq_printf(seq, "%s", buff);
kfree(buff);
static void _hna_local_del(void *data, void *arg)
{
+ struct bat_priv *bat_priv = (struct bat_priv *)arg;
+
kfree(data);
- num_hna--;
- atomic_set(&hna_local_changed, 1);
+ bat_priv->num_local_hna--;
+ atomic_set(&bat_priv->hna_local_changed, 1);
}
static void hna_local_del(struct bat_priv *bat_priv,
bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
hna_local_entry->addr, message);
- hash_remove(hna_local_hash, hna_local_entry->addr);
+ hash_remove(bat_priv->hna_local_hash, hna_local_entry->addr);
_hna_local_del(hna_local_entry, bat_priv);
}
struct hna_local_entry *hna_local_entry;
unsigned long flags;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
hna_local_entry = (struct hna_local_entry *)
- hash_find(hna_local_hash, addr);
+ hash_find(bat_priv->hna_local_hash, addr);
if (hna_local_entry)
hna_local_del(bat_priv, hna_local_entry, message);
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
}
static void hna_local_purge(struct work_struct *work)
{
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct bat_priv *bat_priv =
+ container_of(delayed_work, struct bat_priv, hna_work);
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
unsigned long flags;
unsigned long timeout;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- while (hash_iterate(hna_local_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
hna_local_entry = hashit.bucket->data;
timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
- /* if ((!hna_local_entry->never_purge) &&
+
+ if ((!hna_local_entry->never_purge) &&
time_after(jiffies, timeout))
hna_local_del(bat_priv, hna_local_entry,
- "address timed out");*/
+ "address timed out");
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
- hna_local_start_timer();
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ hna_local_start_timer(bat_priv);
}
-void hna_local_free(void)
+void hna_local_free(struct bat_priv *bat_priv)
{
- if (!hna_local_hash)
+ if (!bat_priv->hna_local_hash)
return;
- cancel_delayed_work_sync(&hna_local_purge_wq);
- hash_delete(hna_local_hash, _hna_local_del, NULL);
- hna_local_hash = NULL;
+ cancel_delayed_work_sync(&bat_priv->hna_work);
+ hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
+ bat_priv->hna_local_hash = NULL;
}
-int hna_global_init(void)
+int hna_global_init(struct bat_priv *bat_priv)
{
- if (hna_global_hash)
+ if (bat_priv->hna_global_hash)
return 1;
- hna_global_hash = hash_new(128, compare_orig, choose_orig);
+ bat_priv->hna_global_hash = hash_new(128, compare_orig, choose_orig);
- if (!hna_global_hash)
+ if (!bat_priv->hna_global_hash)
return 0;
return 1;
unsigned char *hna_ptr;
while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
hna_global_entry = (struct hna_global_entry *)
- hash_find(hna_global_hash, hna_ptr);
+ hash_find(bat_priv->hna_global_hash, hna_ptr);
- if (hna_global_entry == NULL) {
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ if (!hna_global_entry) {
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock,
+ flags);
hna_global_entry =
kmalloc(sizeof(struct hna_global_entry),
"%pM (via %pM)\n",
hna_global_entry->addr, orig_node->orig);
- spin_lock_irqsave(&hna_global_hash_lock, flags);
- hash_add(hna_global_hash, hna_global_entry);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ hash_add(bat_priv->hna_global_hash, hna_global_entry);
}
hna_global_entry->orig_node = orig_node;
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
/* remove address from local hash if present */
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
hna_local_entry = (struct hna_local_entry *)
- hash_find(hna_local_hash, hna_ptr);
+ hash_find(bat_priv->hna_local_hash, hna_ptr);
- if (hna_local_entry != NULL)
+ if (hna_local_entry)
hna_local_del(bat_priv, hna_local_entry,
"global hna received");
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
hna_buff_count++;
}
}
}
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
- if (hna_global_hash->elements * 4 > hna_global_hash->size) {
- swaphash = hash_resize(hna_global_hash,
- hna_global_hash->size * 2);
+ if (bat_priv->hna_global_hash->elements * 4 >
+ bat_priv->hna_global_hash->size) {
+ swaphash = hash_resize(bat_priv->hna_global_hash,
+ bat_priv->hna_global_hash->size * 2);
- if (swaphash == NULL)
+ if (!swaphash)
pr_err("Couldn't resize global hna hash table\n");
else
- hna_global_hash = swaphash;
+ bat_priv->hna_global_hash = swaphash;
}
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
}
int hna_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
net_dev->name);
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
- while (hash_iterate(hna_global_hash, &hashit_count))
+ while (hash_iterate(bat_priv->hna_global_hash, &hashit_count))
buf_size += 43;
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
return -ENOMEM;
}
buff[0] = '\0';
pos = 0;
- while (hash_iterate(hna_global_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_global_hash, &hashit)) {
hna_global_entry = hashit.bucket->data;
pos += snprintf(buff + pos, 44,
hna_global_entry->orig_node->orig);
}
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
seq_printf(seq, "%s", buff);
kfree(buff);
hna_global_entry->addr, hna_global_entry->orig_node->orig,
message);
- hash_remove(hna_global_hash, hna_global_entry->addr);
+ hash_remove(bat_priv->hna_global_hash, hna_global_entry->addr);
kfree(hna_global_entry);
}
if (orig_node->hna_buff_len == 0)
return;
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
hna_global_entry = (struct hna_global_entry *)
- hash_find(hna_global_hash, hna_ptr);
+ hash_find(bat_priv->hna_global_hash, hna_ptr);
- if ((hna_global_entry != NULL) &&
+ if ((hna_global_entry) &&
(hna_global_entry->orig_node == orig_node))
_hna_global_del_orig(bat_priv, hna_global_entry,
message);
hna_buff_count++;
}
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
orig_node->hna_buff_len = 0;
kfree(orig_node->hna_buff);
kfree(data);
}
-void hna_global_free(void)
+void hna_global_free(struct bat_priv *bat_priv)
{
- if (!hna_global_hash)
+ if (!bat_priv->hna_global_hash)
return;
- hash_delete(hna_global_hash, hna_global_del, NULL);
- hna_global_hash = NULL;
+ hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
+ bat_priv->hna_global_hash = NULL;
}
-struct orig_node *transtable_search(uint8_t *addr)
+struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
{
struct hna_global_entry *hna_global_entry;
unsigned long flags;
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
hna_global_entry = (struct hna_global_entry *)
- hash_find(hna_global_hash, addr);
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ hash_find(bat_priv->hna_global_hash, addr);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
- if (hna_global_entry == NULL)
+ if (!hna_global_entry)
return NULL;
return hna_global_entry->orig_node;
#include "types.h"
-int hna_local_init(void);
+int hna_local_init(struct bat_priv *bat_priv);
void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
void hna_local_remove(struct bat_priv *bat_priv,
uint8_t *addr, char *message);
-int hna_local_fill_buffer(unsigned char *buff, int buff_len);
+int hna_local_fill_buffer(struct bat_priv *bat_priv,
+ unsigned char *buff, int buff_len);
int hna_local_seq_print_text(struct seq_file *seq, void *offset);
-void hna_local_free(void);
-int hna_global_init(void);
+void hna_local_free(struct bat_priv *bat_priv);
+int hna_global_init(struct bat_priv *bat_priv);
void hna_global_add_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node,
unsigned char *hna_buff, int hna_buff_len);
int hna_global_seq_print_text(struct seq_file *seq, void *offset);
void hna_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, char *message);
-void hna_global_free(void);
-struct orig_node *transtable_search(uint8_t *addr);
-
-extern spinlock_t hna_local_hash_lock;
-extern struct hashtable_t *hna_local_hash;
-extern atomic_t hna_local_changed;
+void hna_global_free(struct bat_priv *bat_priv);
+struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
};
/**
- * orig_node - structure for orig_list maintaining nodes of mesh
- * @primary_addr: hosts primary interface address
- * @last_valid: when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @flags: for now only VIS_SERVER flag
- * @last_real_seqno: last and best known squence number
- * @last_ttl: ttl of last received packet
- * @last_bcast_seqno: last broadcast sequence number received by this host
- *
- * @candidates: how many candidates are available
- * @selected: next bonding candidate
+ * orig_node - structure for orig_list maintaining nodes of mesh
+ * @primary_addr: hosts primary interface address
+ * @last_valid: when last packet from this node was received
+ * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @batman_seqno_reset: time when the batman seqno window was reset
+ * @flags: for now only VIS_SERVER flag
+ * @last_real_seqno: last and best known squence number
+ * @last_ttl: ttl of last received packet
+ * @last_bcast_seqno: last broadcast sequence number received by this host
+ *
+ * @candidates: how many candidates are available
+ * @selected: next bonding candidate
*/
struct orig_node {
uint8_t orig[ETH_ALEN];
};
/**
- * neigh_node
- * @last_valid: when last packet via this neighbor was received
+ * neigh_node
+ * @last_valid: when last packet via this neighbor was received
*/
struct neigh_node {
struct list_head list;
};
struct bat_priv {
+ atomic_t mesh_state;
struct net_device_stats stats;
atomic_t aggregation_enabled;
atomic_t bonding_enabled;
atomic_t vis_mode;
atomic_t orig_interval;
atomic_t log_level;
+ atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
char num_ifaces;
struct batman_if *primary_if;
struct kobject *mesh_obj;
struct dentry *debug_dir;
+ struct hlist_head forw_bat_list;
+ struct hlist_head forw_bcast_list;
+ struct hlist_head gw_list;
+ struct list_head vis_send_list;
+ struct hashtable_t *orig_hash;
+ struct hashtable_t *hna_local_hash;
+ struct hashtable_t *hna_global_hash;
+ struct hashtable_t *vis_hash;
+ spinlock_t orig_hash_lock;
+ spinlock_t forw_bat_list_lock;
+ spinlock_t forw_bcast_list_lock;
+ spinlock_t hna_lhash_lock;
+ spinlock_t hna_ghash_lock;
+ spinlock_t gw_list_lock;
+ spinlock_t vis_hash_lock;
+ spinlock_t vis_list_lock;
+ int16_t num_local_hna;
+ atomic_t hna_local_changed;
+ struct delayed_work hna_work;
+ struct delayed_work orig_work;
+ struct delayed_work vis_work;
+ struct gw_node *curr_gw;
+ struct vis_info *my_vis_info;
};
struct socket_client {
};
/**
- * forw_packet - structure for forw_list maintaining packets to be
- * send/forwarded
+ * forw_packet - structure for forw_list maintaining packets to be
+ * send/forwarded
*/
struct forw_packet {
struct hlist_node list;
struct sk_buff *skb;
};
+struct vis_info {
+ unsigned long first_seen;
+ struct list_head recv_list;
+ /* list of server-neighbors we received a vis-packet
+ * from. we should not reply to them. */
+ struct list_head send_list;
+ struct kref refcount;
+ struct bat_priv *bat_priv;
+ /* this packet might be part of the vis send queue. */
+ struct sk_buff *skb_packet;
+ /* vis_info may follow here*/
+} __attribute__((packed));
+
+struct vis_info_entry {
+ uint8_t src[ETH_ALEN];
+ uint8_t dest[ETH_ALEN];
+ uint8_t quality; /* quality = 0 means HNA */
+} __attribute__((packed));
+
+struct recvlist_node {
+ struct list_head list;
+ uint8_t mac[ETH_ALEN];
+};
+
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
struct sk_buff *skb)
{
struct unicast_frag_packet *up =
- (struct unicast_frag_packet *) skb->data;
+ (struct unicast_frag_packet *)skb->data;
struct sk_buff *tmp_skb;
/* set skb to the first part and tmp_skb to the second part */
{
struct frag_packet_list_entry *tfp;
struct unicast_frag_packet *up =
- (struct unicast_frag_packet *) skb->data;
+ (struct unicast_frag_packet *)skb->data;
/* free and oldest packets stand at the end */
tfp = list_entry((head)->prev, typeof(*tfp), list);
if (tfp->seqno == ntohs(up->seqno))
goto mov_tail;
- tmp_up = (struct unicast_frag_packet *) tfp->skb->data;
+ tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
if (tfp->seqno == search_seqno) {
uint8_t dstaddr[6];
unsigned long flags;
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
/* get routing information */
- orig_node = ((struct orig_node *)hash_find(orig_hash, ethhdr->h_dest));
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
+ ethhdr->h_dest));
/* check for hna host */
if (!orig_node)
- orig_node = transtable_search(ethhdr->h_dest);
+ orig_node = transtable_search(bat_priv, ethhdr->h_dest);
router = find_router(orig_node, NULL);
batman_if = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if (batman_if->if_status != IF_ACTIVE)
goto dropped;
return 0;
unlock:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
dropped:
kfree_skb(skb);
return 1;
#include "hard-interface.h"
#include "hash.h"
+#define MAX_VIS_PACKET_SIZE 1000
+
/* Returns the smallest signed integer in two's complement with the sizeof x */
#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
_dummy > smallest_signed_int(_dummy); })
#define seq_after(x, y) seq_before(y, x)
-#define MAX_VIS_PACKET_SIZE 1000
-
-static struct hashtable_t *vis_hash;
-static DEFINE_SPINLOCK(vis_hash_lock);
-static DEFINE_SPINLOCK(recv_list_lock);
-static struct vis_info *my_vis_info;
-static struct list_head send_list; /* always locked with vis_hash_lock */
-
-static void start_vis_timer(void);
+static void start_vis_timer(struct bat_priv *bat_priv);
/* free the info */
static void free_info(struct kref *ref)
{
struct vis_info *info = container_of(ref, struct vis_info, refcount);
+ struct bat_priv *bat_priv = info->bat_priv;
struct recvlist_node *entry, *tmp;
unsigned long flags;
list_del_init(&info->send_list);
- spin_lock_irqsave(&recv_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
- spin_unlock_irqrestore(&recv_list_lock, flags);
+
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
kfree_skb(info->skb_packet);
- kfree(info);
}
/* Compare two vis packets, used by the hashing algorithm */
buf_size = 1;
/* Estimate length */
- spin_lock_irqsave(&vis_hash_lock, flags);
- while (hash_iterate(vis_hash, &hashit_count)) {
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ while (hash_iterate(bat_priv->vis_hash, &hashit_count)) {
info = hashit_count.bucket->data;
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
return -ENOMEM;
}
buff[0] = '\0';
buff_pos = 0;
- while (hash_iterate(vis_hash, &hashit)) {
+ while (hash_iterate(bat_priv->vis_hash, &hashit)) {
info = hashit.bucket->data;
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
}
}
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
seq_printf(seq, "%s", buff);
kfree(buff);
/* add the info packet to the send list, if it was not
* already linked in. */
-static void send_list_add(struct vis_info *info)
+static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
- list_add_tail(&info->send_list, &send_list);
+ list_add_tail(&info->send_list, &bat_priv->vis_send_list);
}
}
}
/* tries to add one entry to the receive list. */
-static void recv_list_add(struct list_head *recv_list, char *mac)
+static void recv_list_add(struct bat_priv *bat_priv,
+ struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
unsigned long flags;
return;
memcpy(entry->mac, mac, ETH_ALEN);
- spin_lock_irqsave(&recv_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
list_add_tail(&entry->list, recv_list);
- spin_unlock_irqrestore(&recv_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
}
/* returns 1 if this mac is in the recv_list */
-static int recv_list_is_in(struct list_head *recv_list, char *mac)
+static int recv_list_is_in(struct bat_priv *bat_priv,
+ struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
unsigned long flags;
- spin_lock_irqsave(&recv_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
list_for_each_entry(entry, recv_list, list) {
if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
- spin_unlock_irqrestore(&recv_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock,
+ flags);
return 1;
}
}
- spin_unlock_irqrestore(&recv_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
return 0;
}
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
* broken.. ). vis hash must be locked outside. is_new is set when the packet
* is newer than old entries in the hash. */
-static struct vis_info *add_packet(struct vis_packet *vis_packet,
+static struct vis_info *add_packet(struct bat_priv *bat_priv,
+ struct vis_packet *vis_packet,
int vis_info_len, int *is_new,
int make_broadcast)
{
*is_new = 0;
/* sanity check */
- if (vis_hash == NULL)
+ if (!bat_priv->vis_hash)
return NULL;
/* see if the packet is already in vis_hash */
sizeof(struct vis_packet));
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
- old_info = hash_find(vis_hash, &search_elem);
+ old_info = hash_find(bat_priv->vis_hash, &search_elem);
kfree_skb(search_elem.skb_packet);
if (old_info != NULL) {
old_packet = (struct vis_packet *)old_info->skb_packet->data;
if (!seq_after(ntohl(vis_packet->seqno),
- ntohl(old_packet->seqno))) {
+ ntohl(old_packet->seqno))) {
if (old_packet->seqno == vis_packet->seqno) {
- recv_list_add(&old_info->recv_list,
+ recv_list_add(bat_priv, &old_info->recv_list,
vis_packet->sender_orig);
return old_info;
} else {
}
}
/* remove old entry */
- hash_remove(vis_hash, old_info);
+ hash_remove(bat_priv->vis_hash, old_info);
send_list_del(old_info);
kref_put(&old_info->refcount, free_info);
}
info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
- if (info == NULL)
+ if (!info)
return NULL;
info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
+ info->bat_priv = bat_priv;
memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
/* initialize and add new packet. */
if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
packet->entries = vis_info_len / sizeof(struct vis_info_entry);
- recv_list_add(&info->recv_list, packet->sender_orig);
+ recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
- if (hash_add(vis_hash, info) < 0) {
+ if (hash_add(bat_priv->vis_hash, info) < 0) {
/* did not work (for some reason) */
kref_put(&old_info->refcount, free_info);
info = NULL;
make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
- spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast);
- if (info == NULL)
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ info = add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, make_broadcast);
+ if (!info)
goto end;
/* only if we are server ourselves and packet is newer than the one in
* hash.*/
if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
- send_list_add(info);
+ send_list_add(bat_priv, info);
end:
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
}
/* handle an incoming client update packet and schedule forward if needed. */
is_my_mac(vis_packet->target_orig))
are_target = 1;
- spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new, are_target);
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ info = add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, are_target);
- if (info == NULL)
+ if (!info)
goto end;
/* note that outdated packets will be dropped at this point. */
/* send only if we're the target server or ... */
if (are_target && is_new) {
packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
- send_list_add(info);
+ send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
} else if (!is_my_mac(packet->target_orig)) {
- send_list_add(info);
+ send_list_add(bat_priv, info);
}
end:
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
* address to its address and return the best_tq.
*
* Must be called with the originator hash locked */
-static int find_best_vis_server(struct vis_info *info)
+static int find_best_vis_server(struct bat_priv *bat_priv,
+ struct vis_info *info)
{
HASHIT(hashit);
struct orig_node *orig_node;
packet = (struct vis_packet *)info->skb_packet->data;
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
- if ((orig_node != NULL) &&
- (orig_node->router != NULL) &&
+ if ((orig_node) && (orig_node->router) &&
(orig_node->flags & VIS_SERVER) &&
(orig_node->router->tq_avg > best_tq)) {
best_tq = orig_node->router->tq_avg;
HASHIT(hashit_local);
HASHIT(hashit_global);
struct orig_node *orig_node;
- struct vis_info *info = (struct vis_info *)my_vis_info;
+ struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
struct vis_info_entry *entry;
struct hna_local_entry *hna_local_entry;
info->first_seen = jiffies;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
packet->ttl = TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
skb_trim(info->skb_packet, sizeof(struct vis_packet));
if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
- best_tq = find_best_vis_server(info);
+ best_tq = find_best_vis_server(bat_priv, info);
+
if (best_tq < 0) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
+ flags);
return -1;
}
}
- while (hash_iterate(orig_hash, &hashit_global)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit_global)) {
orig_node = hashit_global.bucket->data;
- if (orig_node->router != NULL
- && compare_orig(orig_node->router->addr,
- orig_node->orig)
- && (orig_node->router->if_incoming->if_status ==
- IF_ACTIVE)
- && orig_node->router->tq_avg > 0) {
-
- /* fill one entry into buffer. */
- entry = (struct vis_info_entry *)
+
+ if (!orig_node->router)
+ continue;
+
+ if (!compare_orig(orig_node->router->addr, orig_node->orig))
+ continue;
+
+ if (orig_node->router->if_incoming->if_status != IF_ACTIVE)
+ continue;
+
+ if (orig_node->router->tq_avg < 1)
+ continue;
+
+ /* fill one entry into buffer. */
+ entry = (struct vis_info_entry *)
skb_put(info->skb_packet, sizeof(*entry));
- memcpy(entry->src,
- orig_node->router->if_incoming->net_dev->dev_addr,
- ETH_ALEN);
- memcpy(entry->dest, orig_node->orig, ETH_ALEN);
- entry->quality = orig_node->router->tq_avg;
- packet->entries++;
-
- if (vis_packet_full(info)) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
- return 0;
- }
+ memcpy(entry->src,
+ orig_node->router->if_incoming->net_dev->dev_addr,
+ ETH_ALEN);
+ memcpy(entry->dest, orig_node->orig, ETH_ALEN);
+ entry->quality = orig_node->router->tq_avg;
+ packet->entries++;
+
+ if (vis_packet_full(info)) {
+ spin_unlock_irqrestore(
+ &bat_priv->orig_hash_lock, flags);
+ return 0;
}
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- spin_lock_irqsave(&hna_local_hash_lock, flags);
- while (hash_iterate(hna_local_hash, &hashit_local)) {
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit_local)) {
hna_local_entry = hashit_local.bucket->data;
entry = (struct vis_info_entry *)skb_put(info->skb_packet,
sizeof(*entry));
packet->entries++;
if (vis_packet_full(info)) {
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock,
+ flags);
return 0;
}
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
return 0;
}
/* free old vis packets. Must be called with this vis_hash_lock
* held */
-static void purge_vis_packets(void)
+static void purge_vis_packets(struct bat_priv *bat_priv)
{
HASHIT(hashit);
struct vis_info *info;
- while (hash_iterate(vis_hash, &hashit)) {
+ while (hash_iterate(bat_priv->vis_hash, &hashit)) {
info = hashit.bucket->data;
- if (info == my_vis_info) /* never purge own data. */
+
+ /* never purge own data. */
+ if (info == bat_priv->my_vis_info)
continue;
+
if (time_after(jiffies,
info->first_seen + VIS_TIMEOUT * HZ)) {
- hash_remove_bucket(vis_hash, &hashit);
+ hash_remove_bucket(bat_priv->vis_hash, &hashit);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
}
}
-static void broadcast_vis_packet(struct vis_info *info)
+static void broadcast_vis_packet(struct bat_priv *bat_priv,
+ struct vis_info *info)
{
HASHIT(hashit);
struct orig_node *orig_node;
struct batman_if *batman_if;
uint8_t dstaddr[ETH_ALEN];
- spin_lock_irqsave(&orig_hash_lock, flags);
+
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
packet = (struct vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
/* if it's a vis server and reachable, send it. */
continue;
/* don't send it if we already received the packet from
* this node. */
- if (recv_list_is_in(&info->recv_list, orig_node->orig))
+ if (recv_list_is_in(bat_priv, &info->recv_list,
+ orig_node->orig))
continue;
memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, batman_if, dstaddr);
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
- memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
+
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
-static void unicast_vis_packet(struct vis_info *info)
+static void unicast_vis_packet(struct bat_priv *bat_priv,
+ struct vis_info *info)
{
struct orig_node *orig_node;
struct sk_buff *skb;
struct batman_if *batman_if;
uint8_t dstaddr[ETH_ALEN];
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
packet = (struct vis_packet *)info->skb_packet->data;
- orig_node = ((struct orig_node *)hash_find(orig_hash,
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
packet->target_orig));
if ((!orig_node) || (!orig_node->router))
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
return;
out:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
/* only send one vis packet. called from send_vis_packets() */
-static void send_vis_packet(struct vis_info *info)
+static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
{
struct vis_packet *packet;
return;
}
- memcpy(packet->sender_orig, main_if_addr, ETH_ALEN);
+ memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
+ ETH_ALEN);
packet->ttl--;
if (is_bcast(packet->target_orig))
- broadcast_vis_packet(info);
+ broadcast_vis_packet(bat_priv, info);
else
- unicast_vis_packet(info);
+ unicast_vis_packet(bat_priv, info);
packet->ttl++; /* restore TTL */
}
/* called from timer; send (and maybe generate) vis packet. */
static void send_vis_packets(struct work_struct *work)
{
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct bat_priv *bat_priv =
+ container_of(delayed_work, struct bat_priv, vis_work);
struct vis_info *info, *temp;
unsigned long flags;
- /* struct bat_priv *bat_priv = netdev_priv(soft_device); */
-
- spin_lock_irqsave(&vis_hash_lock, flags);
- purge_vis_packets();
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ purge_vis_packets(bat_priv);
- /* if (generate_vis_packet(bat_priv) == 0) {*/
+ if (generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
- /*send_list_add(my_vis_info);
- } */
+ send_list_add(bat_priv, bat_priv->my_vis_info);
+ }
- list_for_each_entry_safe(info, temp, &send_list, send_list) {
+ list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
+ send_list) {
kref_get(&info->refcount);
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
- send_vis_packet(info);
+ if (bat_priv->primary_if)
+ send_vis_packet(bat_priv, info);
- spin_lock_irqsave(&vis_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
- spin_unlock_irqrestore(&vis_hash_lock, flags);
- start_vis_timer();
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ start_vis_timer(bat_priv);
}
-static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets);
/* init the vis server. this may only be called when if_list is already
* initialized (e.g. bat0 is initialized, interfaces have been added) */
-int vis_init(void)
+int vis_init(struct bat_priv *bat_priv)
{
struct vis_packet *packet;
unsigned long flags;
- if (vis_hash)
+
+ if (bat_priv->vis_hash)
return 1;
- spin_lock_irqsave(&vis_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
- if (!vis_hash) {
+ bat_priv->vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
+ if (!bat_priv->vis_hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
- my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
- if (!my_vis_info) {
+ bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+ if (!bat_priv->my_vis_info) {
pr_err("Can't initialize vis packet\n");
goto err;
}
- my_vis_info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
+ bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
+ sizeof(struct vis_packet) +
MAX_VIS_PACKET_SIZE +
sizeof(struct ethhdr));
- if (!my_vis_info->skb_packet)
+ if (!bat_priv->my_vis_info->skb_packet)
goto free_info;
- skb_reserve(my_vis_info->skb_packet, sizeof(struct ethhdr));
- packet = (struct vis_packet *)skb_put(my_vis_info->skb_packet,
- sizeof(struct vis_packet));
+
+ skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
+ packet = (struct vis_packet *)skb_put(
+ bat_priv->my_vis_info->skb_packet,
+ sizeof(struct vis_packet));
/* prefill the vis info */
- my_vis_info->first_seen = jiffies - msecs_to_jiffies(VIS_INTERVAL);
- INIT_LIST_HEAD(&my_vis_info->recv_list);
- INIT_LIST_HEAD(&my_vis_info->send_list);
- kref_init(&my_vis_info->refcount);
+ bat_priv->my_vis_info->first_seen = jiffies -
+ msecs_to_jiffies(VIS_INTERVAL);
+ INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
+ INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
+ kref_init(&bat_priv->my_vis_info->refcount);
+ bat_priv->my_vis_info->bat_priv = bat_priv;
packet->version = COMPAT_VERSION;
packet->packet_type = BAT_VIS;
packet->ttl = TTL;
packet->seqno = 0;
packet->entries = 0;
- INIT_LIST_HEAD(&send_list);
-
- memcpy(packet->vis_orig, main_if_addr, ETH_ALEN);
- memcpy(packet->sender_orig, main_if_addr, ETH_ALEN);
+ INIT_LIST_HEAD(&bat_priv->vis_send_list);
- if (hash_add(vis_hash, my_vis_info) < 0) {
+ if (hash_add(bat_priv->vis_hash, bat_priv->my_vis_info) < 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
- kref_put(&my_vis_info->refcount, free_info);
+ kref_put(&bat_priv->my_vis_info->refcount, free_info);
goto err;
}
- spin_unlock_irqrestore(&vis_hash_lock, flags);
- start_vis_timer();
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ start_vis_timer(bat_priv);
return 1;
free_info:
- kfree(my_vis_info);
- my_vis_info = NULL;
+ kfree(bat_priv->my_vis_info);
+ bat_priv->my_vis_info = NULL;
err:
- spin_unlock_irqrestore(&vis_hash_lock, flags);
- vis_quit();
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ vis_quit(bat_priv);
return 0;
}
}
/* shutdown vis-server */
-void vis_quit(void)
+void vis_quit(struct bat_priv *bat_priv)
{
unsigned long flags;
- if (!vis_hash)
+ if (!bat_priv->vis_hash)
return;
- cancel_delayed_work_sync(&vis_timer_wq);
+ cancel_delayed_work_sync(&bat_priv->vis_work);
- spin_lock_irqsave(&vis_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
/* properly remove, kill timers ... */
- hash_delete(vis_hash, free_info_ref, NULL);
- vis_hash = NULL;
- my_vis_info = NULL;
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
+ bat_priv->vis_hash = NULL;
+ bat_priv->my_vis_info = NULL;
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
}
/* schedule packets for (re)transmission */
-static void start_vis_timer(void)
+static void start_vis_timer(struct bat_priv *bat_priv)
{
- queue_delayed_work(bat_event_workqueue, &vis_timer_wq,
- (VIS_INTERVAL * HZ) / 1000);
+ INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
+ queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
+ msecs_to_jiffies(VIS_INTERVAL));
}
#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */
-struct vis_info {
- unsigned long first_seen;
- struct list_head recv_list;
- /* list of server-neighbors we received a vis-packet
- * from. we should not reply to them. */
- struct list_head send_list;
- struct kref refcount;
- /* this packet might be part of the vis send queue. */
- struct sk_buff *skb_packet;
- /* vis_info may follow here*/
-} __attribute__((packed));
-
-struct vis_info_entry {
- uint8_t src[ETH_ALEN];
- uint8_t dest[ETH_ALEN];
- uint8_t quality; /* quality = 0 means HNA */
-} __attribute__((packed));
-
-struct recvlist_node {
- struct list_head list;
- uint8_t mac[ETH_ALEN];
-};
-
int vis_seq_print_text(struct seq_file *seq, void *offset);
void receive_server_sync_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
void receive_client_update_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len);
-int vis_init(void);
-void vis_quit(void);
+int vis_init(struct bat_priv *bat_priv);
+void vis_quit(struct bat_priv *bat_priv);
#endif /* _NET_BATMAN_ADV_VIS_H_ */