Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
This commit is contained in:
@ -424,6 +424,18 @@ config NETFILTER_XT_TARGET_HL
|
||||
since you can easily create immortal packets that loop
|
||||
forever on the network.
|
||||
|
||||
config NETFILTER_XT_TARGET_IDLETIMER
|
||||
tristate "IDLETIMER target support"
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
|
||||
This option adds the `IDLETIMER' target. Each matching packet
|
||||
resets the timer associated with label specified when the rule is
|
||||
added. When the timer expires, it triggers a sysfs notification.
|
||||
The remaining time for expiration can be read via sysfs.
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
config NETFILTER_XT_TARGET_LED
|
||||
tristate '"LED" target support'
|
||||
depends on LEDS_CLASS && LEDS_TRIGGERS
|
||||
|
@ -61,6 +61,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
|
||||
|
||||
# matches
|
||||
obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
|
||||
|
@ -62,8 +62,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
|
||||
unsigned int nf_conntrack_max __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_max);
|
||||
|
||||
struct nf_conn nf_conntrack_untracked __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
|
||||
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
|
||||
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
|
||||
|
||||
static int nf_conntrack_hash_rnd_initted;
|
||||
static unsigned int nf_conntrack_hash_rnd;
|
||||
@ -1181,10 +1181,21 @@ static void nf_ct_release_dying_list(struct net *net)
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
}
|
||||
|
||||
static int untrack_refs(void)
|
||||
{
|
||||
int cnt = 0, cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
|
||||
|
||||
cnt += atomic_read(&ct->ct_general.use) - 1;
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static void nf_conntrack_cleanup_init_net(void)
|
||||
{
|
||||
/* wait until all references to nf_conntrack_untracked are dropped */
|
||||
while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
|
||||
while (untrack_refs() > 0)
|
||||
schedule();
|
||||
|
||||
nf_conntrack_helper_fini();
|
||||
@ -1319,10 +1330,19 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
|
||||
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
|
||||
&nf_conntrack_htable_size, 0600);
|
||||
|
||||
void nf_ct_untracked_status_or(unsigned long bits)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(nf_conntrack_untracked, cpu).status |= bits;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
|
||||
|
||||
static int nf_conntrack_init_init_net(void)
|
||||
{
|
||||
int max_factor = 8;
|
||||
int ret;
|
||||
int ret, cpu;
|
||||
|
||||
/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
|
||||
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */
|
||||
@ -1361,11 +1381,13 @@ static int nf_conntrack_init_init_net(void)
|
||||
goto err_extend;
|
||||
#endif
|
||||
/* Set up fake conntrack: to never be deleted, not in any hashes */
|
||||
write_pnet(&nf_conntrack_untracked.ct_net, &init_net);
|
||||
atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
|
||||
write_pnet(&ct->ct_net, &init_net);
|
||||
atomic_set(&ct->ct_general.use, 1);
|
||||
}
|
||||
/* - and look it like as a confirmed connection */
|
||||
set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
|
||||
|
||||
nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
|
@ -480,7 +480,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
|
||||
int err;
|
||||
|
||||
/* ignore our fake conntrack entry */
|
||||
if (ct == &nf_conntrack_untracked)
|
||||
if (nf_ct_is_untracked(ct))
|
||||
return 0;
|
||||
|
||||
if (events & (1 << IPCT_DESTROY)) {
|
||||
|
@ -66,9 +66,10 @@ struct nfulnl_instance {
|
||||
u_int16_t group_num; /* number of this queue */
|
||||
u_int16_t flags;
|
||||
u_int8_t copy_mode;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static DEFINE_RWLOCK(instances_lock);
|
||||
static DEFINE_SPINLOCK(instances_lock);
|
||||
static atomic_t global_seq;
|
||||
|
||||
#define INSTANCE_BUCKETS 16
|
||||
@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num)
|
||||
struct nfulnl_instance *inst;
|
||||
|
||||
head = &instance_table[instance_hashfn(group_num)];
|
||||
hlist_for_each_entry(inst, pos, head, hlist) {
|
||||
hlist_for_each_entry_rcu(inst, pos, head, hlist) {
|
||||
if (inst->group_num == group_num)
|
||||
return inst;
|
||||
}
|
||||
@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num)
|
||||
{
|
||||
struct nfulnl_instance *inst;
|
||||
|
||||
read_lock_bh(&instances_lock);
|
||||
rcu_read_lock_bh();
|
||||
inst = __instance_lookup(group_num);
|
||||
if (inst)
|
||||
instance_get(inst);
|
||||
read_unlock_bh(&instances_lock);
|
||||
if (inst && !atomic_inc_not_zero(&inst->use))
|
||||
inst = NULL;
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return inst;
|
||||
}
|
||||
|
||||
static void nfulnl_instance_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
kfree(container_of(head, struct nfulnl_instance, rcu));
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void
|
||||
instance_put(struct nfulnl_instance *inst)
|
||||
{
|
||||
if (inst && atomic_dec_and_test(&inst->use)) {
|
||||
kfree(inst);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
if (inst && atomic_dec_and_test(&inst->use))
|
||||
call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
|
||||
}
|
||||
|
||||
static void nfulnl_timer(unsigned long data);
|
||||
@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid)
|
||||
struct nfulnl_instance *inst;
|
||||
int err;
|
||||
|
||||
write_lock_bh(&instances_lock);
|
||||
spin_lock_bh(&instances_lock);
|
||||
if (__instance_lookup(group_num)) {
|
||||
err = -EEXIST;
|
||||
goto out_unlock;
|
||||
@ -166,32 +171,37 @@ instance_create(u_int16_t group_num, int pid)
|
||||
inst->copy_mode = NFULNL_COPY_PACKET;
|
||||
inst->copy_range = NFULNL_COPY_RANGE_MAX;
|
||||
|
||||
hlist_add_head(&inst->hlist,
|
||||
hlist_add_head_rcu(&inst->hlist,
|
||||
&instance_table[instance_hashfn(group_num)]);
|
||||
|
||||
write_unlock_bh(&instances_lock);
|
||||
spin_unlock_bh(&instances_lock);
|
||||
|
||||
return inst;
|
||||
|
||||
out_unlock:
|
||||
write_unlock_bh(&instances_lock);
|
||||
spin_unlock_bh(&instances_lock);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void __nfulnl_flush(struct nfulnl_instance *inst);
|
||||
|
||||
/* called with BH disabled */
|
||||
static void
|
||||
__instance_destroy(struct nfulnl_instance *inst)
|
||||
{
|
||||
/* first pull it out of the global list */
|
||||
hlist_del(&inst->hlist);
|
||||
hlist_del_rcu(&inst->hlist);
|
||||
|
||||
/* then flush all pending packets from skb */
|
||||
|
||||
spin_lock_bh(&inst->lock);
|
||||
spin_lock(&inst->lock);
|
||||
|
||||
/* lockless readers wont be able to use us */
|
||||
inst->copy_mode = NFULNL_COPY_DISABLED;
|
||||
|
||||
if (inst->skb)
|
||||
__nfulnl_flush(inst);
|
||||
spin_unlock_bh(&inst->lock);
|
||||
spin_unlock(&inst->lock);
|
||||
|
||||
/* and finally put the refcount */
|
||||
instance_put(inst);
|
||||
@ -200,9 +210,9 @@ __instance_destroy(struct nfulnl_instance *inst)
|
||||
static inline void
|
||||
instance_destroy(struct nfulnl_instance *inst)
|
||||
{
|
||||
write_lock_bh(&instances_lock);
|
||||
spin_lock_bh(&instances_lock);
|
||||
__instance_destroy(inst);
|
||||
write_unlock_bh(&instances_lock);
|
||||
spin_unlock_bh(&instances_lock);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -621,6 +631,7 @@ nfulnl_log_packet(u_int8_t pf,
|
||||
size += nla_total_size(data_len);
|
||||
break;
|
||||
|
||||
case NFULNL_COPY_DISABLED:
|
||||
default:
|
||||
goto unlock_and_release;
|
||||
}
|
||||
@ -674,7 +685,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
|
||||
int i;
|
||||
|
||||
/* destroy all instances for this pid */
|
||||
write_lock_bh(&instances_lock);
|
||||
spin_lock_bh(&instances_lock);
|
||||
for (i = 0; i < INSTANCE_BUCKETS; i++) {
|
||||
struct hlist_node *tmp, *t2;
|
||||
struct nfulnl_instance *inst;
|
||||
@ -686,7 +697,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
|
||||
__instance_destroy(inst);
|
||||
}
|
||||
}
|
||||
write_unlock_bh(&instances_lock);
|
||||
spin_unlock_bh(&instances_lock);
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -863,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st)
|
||||
|
||||
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
|
||||
if (!hlist_empty(&instance_table[st->bucket]))
|
||||
return instance_table[st->bucket].first;
|
||||
return rcu_dereference_bh(instance_table[st->bucket].first);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
|
||||
{
|
||||
h = h->next;
|
||||
h = rcu_dereference_bh(h->next);
|
||||
while (!h) {
|
||||
if (++st->bucket >= INSTANCE_BUCKETS)
|
||||
return NULL;
|
||||
|
||||
h = instance_table[st->bucket].first;
|
||||
h = rcu_dereference_bh(instance_table[st->bucket].first);
|
||||
}
|
||||
return h;
|
||||
}
|
||||
@ -892,9 +903,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
|
||||
}
|
||||
|
||||
static void *seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(instances_lock)
|
||||
__acquires(rcu_bh)
|
||||
{
|
||||
read_lock_bh(&instances_lock);
|
||||
rcu_read_lock_bh();
|
||||
return get_idx(seq->private, *pos);
|
||||
}
|
||||
|
||||
@ -905,9 +916,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
}
|
||||
|
||||
static void seq_stop(struct seq_file *s, void *v)
|
||||
__releases(instances_lock)
|
||||
__releases(rcu_bh)
|
||||
{
|
||||
read_unlock_bh(&instances_lock);
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
|
||||
static int seq_show(struct seq_file *s, void *v)
|
||||
|
@ -46,17 +46,19 @@ struct nfqnl_instance {
|
||||
int peer_pid;
|
||||
unsigned int queue_maxlen;
|
||||
unsigned int copy_range;
|
||||
unsigned int queue_total;
|
||||
unsigned int queue_dropped;
|
||||
unsigned int queue_user_dropped;
|
||||
|
||||
unsigned int id_sequence; /* 'sequence' of pkt ids */
|
||||
|
||||
u_int16_t queue_num; /* number of this queue */
|
||||
u_int8_t copy_mode;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
/*
|
||||
* Following fields are dirtied for each queued packet,
|
||||
* keep them in same cache line if possible.
|
||||
*/
|
||||
spinlock_t lock;
|
||||
unsigned int queue_total;
|
||||
atomic_t id_sequence; /* 'sequence' of pkt ids */
|
||||
struct list_head queue_list; /* packets in queue */
|
||||
};
|
||||
|
||||
@ -238,32 +240,24 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
|
||||
|
||||
outdev = entry->outdev;
|
||||
|
||||
spin_lock_bh(&queue->lock);
|
||||
|
||||
switch ((enum nfqnl_config_mode)queue->copy_mode) {
|
||||
switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
|
||||
case NFQNL_COPY_META:
|
||||
case NFQNL_COPY_NONE:
|
||||
break;
|
||||
|
||||
case NFQNL_COPY_PACKET:
|
||||
if (entskb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
skb_checksum_help(entskb)) {
|
||||
spin_unlock_bh(&queue->lock);
|
||||
skb_checksum_help(entskb))
|
||||
return NULL;
|
||||
}
|
||||
if (queue->copy_range == 0
|
||||
|| queue->copy_range > entskb->len)
|
||||
|
||||
data_len = ACCESS_ONCE(queue->copy_range);
|
||||
if (data_len == 0 || data_len > entskb->len)
|
||||
data_len = entskb->len;
|
||||
else
|
||||
data_len = queue->copy_range;
|
||||
|
||||
size += nla_total_size(data_len);
|
||||
break;
|
||||
}
|
||||
|
||||
entry->id = queue->id_sequence++;
|
||||
|
||||
spin_unlock_bh(&queue->lock);
|
||||
|
||||
skb = alloc_skb(size, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
@ -278,6 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
|
||||
nfmsg->version = NFNETLINK_V0;
|
||||
nfmsg->res_id = htons(queue->queue_num);
|
||||
|
||||
entry->id = atomic_inc_return(&queue->id_sequence);
|
||||
pmsg.packet_id = htonl(entry->id);
|
||||
pmsg.hw_protocol = entskb->protocol;
|
||||
pmsg.hook = entry->hook;
|
||||
@ -868,7 +863,7 @@ static int seq_show(struct seq_file *s, void *v)
|
||||
inst->peer_pid, inst->queue_total,
|
||||
inst->copy_mode, inst->copy_range,
|
||||
inst->queue_dropped, inst->queue_user_dropped,
|
||||
inst->id_sequence, 1);
|
||||
atomic_read(&inst->id_sequence), 1);
|
||||
}
|
||||
|
||||
static const struct seq_operations nfqnl_seq_ops = {
|
||||
|
@ -67,7 +67,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
|
||||
return -EINVAL;
|
||||
|
||||
if (info->flags & XT_CT_NOTRACK) {
|
||||
ct = &nf_conntrack_untracked;
|
||||
ct = nf_ct_untracked_get();
|
||||
atomic_inc(&ct->ct_general.use);
|
||||
goto out;
|
||||
}
|
||||
@ -132,7 +132,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
|
||||
struct nf_conn *ct = info->ct;
|
||||
struct nf_conn_help *help;
|
||||
|
||||
if (ct != &nf_conntrack_untracked) {
|
||||
if (!nf_ct_is_untracked(ct)) {
|
||||
help = nfct_help(ct);
|
||||
if (help)
|
||||
module_put(help->helper->me);
|
||||
|
314
net/netfilter/xt_IDLETIMER.c
Normal file
314
net/netfilter/xt_IDLETIMER.c
Normal file
@ -0,0 +1,314 @@
|
||||
/*
|
||||
* linux/net/netfilter/xt_IDLETIMER.c
|
||||
*
|
||||
* Netfilter module to trigger a timer when packet matches.
|
||||
* After timer expires a kevent will be sent.
|
||||
*
|
||||
* Copyright (C) 2004, 2010 Nokia Corporation
|
||||
* Written by Timo Teras <ext-timo.teras@nokia.com>
|
||||
*
|
||||
* Converted to x_tables and reworked for upstream inclusion
|
||||
* by Luciano Coelho <luciano.coelho@nokia.com>
|
||||
*
|
||||
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/netfilter.h>
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter/xt_IDLETIMER.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
struct idletimer_tg_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf);
|
||||
};
|
||||
|
||||
struct idletimer_tg {
|
||||
struct list_head entry;
|
||||
struct timer_list timer;
|
||||
struct work_struct work;
|
||||
|
||||
struct kobject *kobj;
|
||||
struct idletimer_tg_attr attr;
|
||||
|
||||
unsigned int refcnt;
|
||||
};
|
||||
|
||||
static LIST_HEAD(idletimer_tg_list);
|
||||
static DEFINE_MUTEX(list_mutex);
|
||||
|
||||
static struct kobject *idletimer_tg_kobj;
|
||||
|
||||
static
|
||||
struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
|
||||
{
|
||||
struct idletimer_tg *entry;
|
||||
|
||||
BUG_ON(!label);
|
||||
|
||||
list_for_each_entry(entry, &idletimer_tg_list, entry) {
|
||||
if (!strcmp(label, entry->attr.attr.name))
|
||||
return entry;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct idletimer_tg *timer;
|
||||
unsigned long expires = 0;
|
||||
|
||||
mutex_lock(&list_mutex);
|
||||
|
||||
timer = __idletimer_tg_find_by_label(attr->name);
|
||||
if (timer)
|
||||
expires = timer->timer.expires;
|
||||
|
||||
mutex_unlock(&list_mutex);
|
||||
|
||||
if (time_after(expires, jiffies))
|
||||
return sprintf(buf, "%u\n",
|
||||
jiffies_to_msecs(expires - jiffies) / 1000);
|
||||
|
||||
return sprintf(buf, "0\n");
|
||||
}
|
||||
|
||||
static void idletimer_tg_work(struct work_struct *work)
|
||||
{
|
||||
struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
|
||||
work);
|
||||
|
||||
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
|
||||
}
|
||||
|
||||
static void idletimer_tg_expired(unsigned long data)
|
||||
{
|
||||
struct idletimer_tg *timer = (struct idletimer_tg *) data;
|
||||
|
||||
pr_debug("timer %s expired\n", timer->attr.attr.name);
|
||||
|
||||
schedule_work(&timer->work);
|
||||
}
|
||||
|
||||
static int idletimer_tg_create(struct idletimer_tg_info *info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
|
||||
if (!info->timer) {
|
||||
pr_debug("couldn't alloc timer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
|
||||
if (!info->timer->attr.attr.name) {
|
||||
pr_debug("couldn't alloc attribute name\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_free_timer;
|
||||
}
|
||||
info->timer->attr.attr.mode = S_IRUGO;
|
||||
info->timer->attr.show = idletimer_tg_show;
|
||||
|
||||
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
|
||||
if (ret < 0) {
|
||||
pr_debug("couldn't add file to sysfs");
|
||||
goto out_free_attr;
|
||||
}
|
||||
|
||||
list_add(&info->timer->entry, &idletimer_tg_list);
|
||||
|
||||
setup_timer(&info->timer->timer, idletimer_tg_expired,
|
||||
(unsigned long) info->timer);
|
||||
info->timer->refcnt = 1;
|
||||
|
||||
mod_timer(&info->timer->timer,
|
||||
msecs_to_jiffies(info->timeout * 1000) + jiffies);
|
||||
|
||||
INIT_WORK(&info->timer->work, idletimer_tg_work);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_attr:
|
||||
kfree(info->timer->attr.attr.name);
|
||||
out_free_timer:
|
||||
kfree(info->timer);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The actual xt_tables plugin.
|
||||
*/
|
||||
static unsigned int idletimer_tg_target(struct sk_buff *skb,
|
||||
const struct xt_action_param *par)
|
||||
{
|
||||
const struct idletimer_tg_info *info = par->targinfo;
|
||||
|
||||
pr_debug("resetting timer %s, timeout period %u\n",
|
||||
info->label, info->timeout);
|
||||
|
||||
BUG_ON(!info->timer);
|
||||
|
||||
mod_timer(&info->timer->timer,
|
||||
msecs_to_jiffies(info->timeout * 1000) + jiffies);
|
||||
|
||||
return XT_CONTINUE;
|
||||
}
|
||||
|
||||
static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
|
||||
{
|
||||
struct idletimer_tg_info *info = par->targinfo;
|
||||
int ret;
|
||||
|
||||
pr_debug("checkentry targinfo%s\n", info->label);
|
||||
|
||||
if (info->timeout == 0) {
|
||||
pr_debug("timeout value is zero\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->label[0] == '\0' ||
|
||||
strnlen(info->label,
|
||||
MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
|
||||
pr_debug("label is empty or not nul-terminated\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&list_mutex);
|
||||
|
||||
info->timer = __idletimer_tg_find_by_label(info->label);
|
||||
if (info->timer) {
|
||||
info->timer->refcnt++;
|
||||
mod_timer(&info->timer->timer,
|
||||
msecs_to_jiffies(info->timeout * 1000) + jiffies);
|
||||
|
||||
pr_debug("increased refcnt of timer %s to %u\n",
|
||||
info->label, info->timer->refcnt);
|
||||
} else {
|
||||
ret = idletimer_tg_create(info);
|
||||
if (ret < 0) {
|
||||
pr_debug("failed to create timer\n");
|
||||
mutex_unlock(&list_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&list_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
|
||||
{
|
||||
const struct idletimer_tg_info *info = par->targinfo;
|
||||
|
||||
pr_debug("destroy targinfo %s\n", info->label);
|
||||
|
||||
mutex_lock(&list_mutex);
|
||||
|
||||
if (--info->timer->refcnt == 0) {
|
||||
pr_debug("deleting timer %s\n", info->label);
|
||||
|
||||
list_del(&info->timer->entry);
|
||||
del_timer_sync(&info->timer->timer);
|
||||
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
|
||||
kfree(info->timer->attr.attr.name);
|
||||
kfree(info->timer);
|
||||
} else {
|
||||
pr_debug("decreased refcnt of timer %s to %u\n",
|
||||
info->label, info->timer->refcnt);
|
||||
}
|
||||
|
||||
mutex_unlock(&list_mutex);
|
||||
}
|
||||
|
||||
static struct xt_target idletimer_tg __read_mostly = {
|
||||
.name = "IDLETIMER",
|
||||
.family = NFPROTO_UNSPEC,
|
||||
.target = idletimer_tg_target,
|
||||
.targetsize = sizeof(struct idletimer_tg_info),
|
||||
.checkentry = idletimer_tg_checkentry,
|
||||
.destroy = idletimer_tg_destroy,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct class *idletimer_tg_class;
|
||||
|
||||
static struct device *idletimer_tg_device;
|
||||
|
||||
static int __init idletimer_tg_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
idletimer_tg_class = class_create(THIS_MODULE, "xt_idletimer");
|
||||
err = PTR_ERR(idletimer_tg_class);
|
||||
if (IS_ERR(idletimer_tg_class)) {
|
||||
pr_debug("couldn't register device class\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
idletimer_tg_device = device_create(idletimer_tg_class, NULL,
|
||||
MKDEV(0, 0), NULL, "timers");
|
||||
err = PTR_ERR(idletimer_tg_device);
|
||||
if (IS_ERR(idletimer_tg_device)) {
|
||||
pr_debug("couldn't register system device\n");
|
||||
goto out_class;
|
||||
}
|
||||
|
||||
idletimer_tg_kobj = &idletimer_tg_device->kobj;
|
||||
|
||||
err = xt_register_target(&idletimer_tg);
|
||||
if (err < 0) {
|
||||
pr_debug("couldn't register xt target\n");
|
||||
goto out_dev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_dev:
|
||||
device_destroy(idletimer_tg_class, MKDEV(0, 0));
|
||||
out_class:
|
||||
class_destroy(idletimer_tg_class);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit idletimer_tg_exit(void)
|
||||
{
|
||||
xt_unregister_target(&idletimer_tg);
|
||||
|
||||
device_destroy(idletimer_tg_class, MKDEV(0, 0));
|
||||
class_destroy(idletimer_tg_class);
|
||||
}
|
||||
|
||||
module_init(idletimer_tg_init);
|
||||
module_exit(idletimer_tg_exit);
|
||||
|
||||
MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
|
||||
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
|
||||
MODULE_DESCRIPTION("Xtables: idle time monitor");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -23,7 +23,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
If there is a real ct entry correspondig to this packet,
|
||||
it'll hang aroun till timing out. We don't deal with it
|
||||
for performance reasons. JK */
|
||||
skb->nfct = &nf_conntrack_untracked.ct_general;
|
||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
||||
skb->nfctinfo = IP_CT_NEW;
|
||||
nf_conntrack_get(skb->nfct);
|
||||
|
||||
|
@ -104,7 +104,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
#ifdef WITH_CONNTRACK
|
||||
/* Avoid counting cloned packets towards the original connection. */
|
||||
nf_conntrack_put(skb->nfct);
|
||||
skb->nfct = &nf_conntrack_untracked.ct_general;
|
||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
||||
skb->nfctinfo = IP_CT_NEW;
|
||||
nf_conntrack_get(skb->nfct);
|
||||
#endif
|
||||
@ -177,7 +177,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
|
||||
#ifdef WITH_CONNTRACK
|
||||
nf_conntrack_put(skb->nfct);
|
||||
skb->nfct = &nf_conntrack_untracked.ct_general;
|
||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
||||
skb->nfctinfo = IP_CT_NEW;
|
||||
nf_conntrack_get(skb->nfct);
|
||||
#endif
|
||||
|
@ -120,7 +120,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
if (ct == NULL)
|
||||
return false;
|
||||
|
||||
if (ct == &nf_conntrack_untracked)
|
||||
if (nf_ct_is_untracked(ct))
|
||||
return false;
|
||||
|
||||
if (ct->master)
|
||||
|
@ -123,11 +123,12 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
|
||||
if (ct == &nf_conntrack_untracked)
|
||||
statebit = XT_CONNTRACK_STATE_UNTRACKED;
|
||||
else if (ct != NULL)
|
||||
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
|
||||
else
|
||||
if (ct) {
|
||||
if (nf_ct_is_untracked(ct))
|
||||
statebit = XT_CONNTRACK_STATE_UNTRACKED;
|
||||
else
|
||||
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
|
||||
} else
|
||||
statebit = XT_CONNTRACK_STATE_INVALID;
|
||||
|
||||
if (info->match_flags & XT_CONNTRACK_STATE) {
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/sctp/sctp.h>
|
||||
#include <linux/sctp.h>
|
||||
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
@ -67,7 +68,7 @@ match_packet(const struct sk_buff *skb,
|
||||
++i, offset, sch->type, htons(sch->length),
|
||||
sch->flags);
|
||||
#endif
|
||||
offset += (ntohs(sch->length) + 3) & ~3;
|
||||
offset += WORD_ROUND(ntohs(sch->length));
|
||||
|
||||
pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
|
||||
|
||||
|
@ -127,7 +127,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
|
||||
* reply packet of an established SNAT-ted connection. */
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (ct && (ct != &nf_conntrack_untracked) &&
|
||||
if (ct && !nf_ct_is_untracked(ct) &&
|
||||
((iph->protocol != IPPROTO_ICMP &&
|
||||
ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
|
||||
(iph->protocol == IPPROTO_ICMP &&
|
||||
|
@ -26,14 +26,16 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
const struct xt_state_info *sinfo = par->matchinfo;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
unsigned int statebit;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
|
||||
if (nf_ct_is_untracked(skb))
|
||||
statebit = XT_STATE_UNTRACKED;
|
||||
else if (!nf_ct_get(skb, &ctinfo))
|
||||
if (!ct)
|
||||
statebit = XT_STATE_INVALID;
|
||||
else
|
||||
statebit = XT_STATE_BIT(ctinfo);
|
||||
|
||||
else {
|
||||
if (nf_ct_is_untracked(ct))
|
||||
statebit = XT_STATE_UNTRACKED;
|
||||
else
|
||||
statebit = XT_STATE_BIT(ctinfo);
|
||||
}
|
||||
return (sinfo->statemask & statebit);
|
||||
}
|
||||
|
||||
|
@ -18,8 +18,8 @@
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
|
||||
struct xt_statistic_priv {
|
||||
uint32_t count;
|
||||
};
|
||||
atomic_t count;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
|
||||
@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
|
||||
MODULE_ALIAS("ipt_statistic");
|
||||
MODULE_ALIAS("ip6t_statistic");
|
||||
|
||||
static DEFINE_SPINLOCK(nth_lock);
|
||||
|
||||
static bool
|
||||
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
{
|
||||
const struct xt_statistic_info *info = par->matchinfo;
|
||||
bool ret = info->flags & XT_STATISTIC_INVERT;
|
||||
int nval, oval;
|
||||
|
||||
switch (info->mode) {
|
||||
case XT_STATISTIC_MODE_RANDOM:
|
||||
@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
ret = !ret;
|
||||
break;
|
||||
case XT_STATISTIC_MODE_NTH:
|
||||
spin_lock_bh(&nth_lock);
|
||||
if (info->master->count++ == info->u.nth.every) {
|
||||
info->master->count = 0;
|
||||
do {
|
||||
oval = atomic_read(&info->master->count);
|
||||
nval = (oval == info->u.nth.every) ? 0 : oval + 1;
|
||||
} while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
|
||||
if (nval == 0)
|
||||
ret = !ret;
|
||||
}
|
||||
spin_unlock_bh(&nth_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
|
||||
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
|
||||
if (info->master == NULL)
|
||||
return -ENOMEM;
|
||||
info->master->count = info->u.nth.count;
|
||||
atomic_set(&info->master->count, info->u.nth.count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user