jump label: Introduce static_branch() interface
Introduce: static __always_inline bool static_branch(struct jump_label_key *key); instead of the old JUMP_LABEL(key, label) macro. In this way, jump labels become really easy to use: Define: struct jump_label_key jump_key; Can be used as: if (static_branch(&jump_key)) do unlikely code enable/disale via: jump_label_inc(&jump_key); jump_label_dec(&jump_key); that's it! For the jump labels disabled case, the static_branch() becomes an atomic_read(), and jump_label_inc()/dec() are simply atomic_inc(), atomic_dec() operations. We show testing results for this change below. Thanks to H. Peter Anvin for suggesting the 'static_branch()' construct. Since we now require a 'struct jump_label_key *key', we can store a pointer into the jump table addresses. In this way, we can enable/disable jump labels, in basically constant time. This change allows us to completely remove the previous hashtable scheme. Thanks to Peter Zijlstra for this re-write. Testing: I ran a series of 'tbench 20' runs 5 times (with reboots) for 3 configurations, where tracepoints were disabled. jump label configured in avg: 815.6 jump label *not* configured in (using atomic reads) avg: 800.1 jump label *not* configured in (regular reads) avg: 803.4 Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20110316212947.GA8792@redhat.com> Signed-off-by: Jason Baron <jbaron@redhat.com> Suggested-by: H. Peter Anvin <hpa@linux.intel.com> Tested-by: David Daney <ddaney@caviumnetworks.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
committed by
Steven Rostedt
parent
ee5e51f51b
commit
d430d3d7e6
@ -2,43 +2,23 @@
|
||||
* jump label support
|
||||
*
|
||||
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
|
||||
* Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
|
||||
*
|
||||
*/
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
#define JUMP_LABEL_HASH_BITS 6
|
||||
#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS)
|
||||
static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE];
|
||||
|
||||
/* mutex to protect coming/going of the the jump_label table */
|
||||
static DEFINE_MUTEX(jump_label_mutex);
|
||||
|
||||
struct jump_label_entry {
|
||||
struct hlist_node hlist;
|
||||
struct jump_entry *table;
|
||||
int nr_entries;
|
||||
/* hang modules off here */
|
||||
struct hlist_head modules;
|
||||
unsigned long key;
|
||||
};
|
||||
|
||||
struct jump_label_module_entry {
|
||||
struct hlist_node hlist;
|
||||
struct jump_entry *table;
|
||||
int nr_entries;
|
||||
struct module *mod;
|
||||
};
|
||||
|
||||
void jump_label_lock(void)
|
||||
{
|
||||
mutex_lock(&jump_label_mutex);
|
||||
@ -49,6 +29,11 @@ void jump_label_unlock(void)
|
||||
mutex_unlock(&jump_label_mutex);
|
||||
}
|
||||
|
||||
bool jump_label_enabled(struct jump_label_key *key)
|
||||
{
|
||||
return !!atomic_read(&key->enabled);
|
||||
}
|
||||
|
||||
static int jump_label_cmp(const void *a, const void *b)
|
||||
{
|
||||
const struct jump_entry *jea = a;
|
||||
@ -64,7 +49,7 @@ static int jump_label_cmp(const void *a, const void *b)
|
||||
}
|
||||
|
||||
static void
|
||||
sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
|
||||
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
@ -73,118 +58,25 @@ sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
|
||||
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
|
||||
}
|
||||
|
||||
static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
|
||||
static void jump_label_update(struct jump_label_key *key, int enable);
|
||||
|
||||
void jump_label_inc(struct jump_label_key *key)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct jump_label_entry *e;
|
||||
u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
|
||||
|
||||
head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
|
||||
hlist_for_each_entry(e, node, head, hlist) {
|
||||
if (key == e->key)
|
||||
return e;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct jump_label_entry *
|
||||
add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct jump_label_entry *e;
|
||||
u32 hash;
|
||||
|
||||
e = get_jump_label_entry(key);
|
||||
if (e)
|
||||
return ERR_PTR(-EEXIST);
|
||||
|
||||
e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL);
|
||||
if (!e)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hash = jhash((void *)&key, sizeof(jump_label_t), 0);
|
||||
head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
|
||||
e->key = key;
|
||||
e->table = table;
|
||||
e->nr_entries = nr_entries;
|
||||
INIT_HLIST_HEAD(&(e->modules));
|
||||
hlist_add_head(&e->hlist, head);
|
||||
return e;
|
||||
}
|
||||
|
||||
static int
|
||||
build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop)
|
||||
{
|
||||
struct jump_entry *iter, *iter_begin;
|
||||
struct jump_label_entry *entry;
|
||||
int count;
|
||||
|
||||
sort_jump_label_entries(start, stop);
|
||||
iter = start;
|
||||
while (iter < stop) {
|
||||
entry = get_jump_label_entry(iter->key);
|
||||
if (!entry) {
|
||||
iter_begin = iter;
|
||||
count = 0;
|
||||
while ((iter < stop) &&
|
||||
(iter->key == iter_begin->key)) {
|
||||
iter++;
|
||||
count++;
|
||||
}
|
||||
entry = add_jump_label_entry(iter_begin->key,
|
||||
count, iter_begin);
|
||||
if (IS_ERR(entry))
|
||||
return PTR_ERR(entry);
|
||||
} else {
|
||||
WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***
|
||||
* jump_label_update - update jump label text
|
||||
* @key - key value associated with a a jump label
|
||||
* @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE
|
||||
*
|
||||
* Will enable/disable the jump for jump label @key, depending on the
|
||||
* value of @type.
|
||||
*
|
||||
*/
|
||||
|
||||
void jump_label_update(unsigned long key, enum jump_label_type type)
|
||||
{
|
||||
struct jump_entry *iter;
|
||||
struct jump_label_entry *entry;
|
||||
struct hlist_node *module_node;
|
||||
struct jump_label_module_entry *e_module;
|
||||
int count;
|
||||
if (atomic_inc_not_zero(&key->enabled))
|
||||
return;
|
||||
|
||||
jump_label_lock();
|
||||
entry = get_jump_label_entry((jump_label_t)key);
|
||||
if (entry) {
|
||||
count = entry->nr_entries;
|
||||
iter = entry->table;
|
||||
while (count--) {
|
||||
if (kernel_text_address(iter->code))
|
||||
arch_jump_label_transform(iter, type);
|
||||
iter++;
|
||||
}
|
||||
/* eanble/disable jump labels in modules */
|
||||
hlist_for_each_entry(e_module, module_node, &(entry->modules),
|
||||
hlist) {
|
||||
count = e_module->nr_entries;
|
||||
iter = e_module->table;
|
||||
while (count--) {
|
||||
if (iter->key &&
|
||||
kernel_text_address(iter->code))
|
||||
arch_jump_label_transform(iter, type);
|
||||
iter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (atomic_add_return(1, &key->enabled) == 1)
|
||||
jump_label_update(key, JUMP_LABEL_ENABLE);
|
||||
jump_label_unlock();
|
||||
}
|
||||
|
||||
void jump_label_dec(struct jump_label_key *key)
|
||||
{
|
||||
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
|
||||
return;
|
||||
|
||||
jump_label_update(key, JUMP_LABEL_DISABLE);
|
||||
jump_label_unlock();
|
||||
}
|
||||
|
||||
@ -197,41 +89,254 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
static int module_conflict(void *start, void *end)
|
||||
static int __jump_label_text_reserved(struct jump_entry *iter_start,
|
||||
struct jump_entry *iter_stop, void *start, void *end)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node, *node_next, *module_node, *module_node_next;
|
||||
struct jump_label_entry *e;
|
||||
struct jump_label_module_entry *e_module;
|
||||
struct jump_entry *iter;
|
||||
int i, count;
|
||||
int conflict = 0;
|
||||
|
||||
for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
|
||||
head = &jump_label_table[i];
|
||||
hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
|
||||
hlist_for_each_entry_safe(e_module, module_node,
|
||||
module_node_next,
|
||||
&(e->modules), hlist) {
|
||||
count = e_module->nr_entries;
|
||||
iter = e_module->table;
|
||||
while (count--) {
|
||||
if (addr_conflict(iter, start, end)) {
|
||||
conflict = 1;
|
||||
goto out;
|
||||
}
|
||||
iter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
iter = iter_start;
|
||||
while (iter < iter_stop) {
|
||||
if (addr_conflict(iter, start, end))
|
||||
return 1;
|
||||
iter++;
|
||||
}
|
||||
out:
|
||||
return conflict;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __jump_label_update(struct jump_label_key *key,
|
||||
struct jump_entry *entry, int enable)
|
||||
{
|
||||
for (; entry->key == (jump_label_t)(unsigned long)key; entry++) {
|
||||
/*
|
||||
* entry->code set to 0 invalidates module init text sections
|
||||
* kernel_text_address() verifies we are not in core kernel
|
||||
* init code, see jump_label_invalidate_module_init().
|
||||
*/
|
||||
if (entry->code && kernel_text_address(entry->code))
|
||||
arch_jump_label_transform(entry, enable);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Not all archs need this.
|
||||
*/
|
||||
void __weak arch_jump_label_text_poke_early(jump_label_t addr)
|
||||
{
|
||||
}
|
||||
|
||||
static __init int jump_label_init(void)
|
||||
{
|
||||
struct jump_entry *iter_start = __start___jump_table;
|
||||
struct jump_entry *iter_stop = __stop___jump_table;
|
||||
struct jump_label_key *key = NULL;
|
||||
struct jump_entry *iter;
|
||||
|
||||
jump_label_lock();
|
||||
jump_label_sort_entries(iter_start, iter_stop);
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
arch_jump_label_text_poke_early(iter->code);
|
||||
if (iter->key == (jump_label_t)(unsigned long)key)
|
||||
continue;
|
||||
|
||||
key = (struct jump_label_key *)(unsigned long)iter->key;
|
||||
atomic_set(&key->enabled, 0);
|
||||
key->entries = iter;
|
||||
#ifdef CONFIG_MODULES
|
||||
key->next = NULL;
|
||||
#endif
|
||||
}
|
||||
jump_label_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(jump_label_init);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
struct jump_label_mod {
|
||||
struct jump_label_mod *next;
|
||||
struct jump_entry *entries;
|
||||
struct module *mod;
|
||||
};
|
||||
|
||||
static int __jump_label_mod_text_reserved(void *start, void *end)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
mod = __module_text_address((unsigned long)start);
|
||||
if (!mod)
|
||||
return 0;
|
||||
|
||||
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
|
||||
|
||||
return __jump_label_text_reserved(mod->jump_entries,
|
||||
mod->jump_entries + mod->num_jump_entries,
|
||||
start, end);
|
||||
}
|
||||
|
||||
static void __jump_label_mod_update(struct jump_label_key *key, int enable)
|
||||
{
|
||||
struct jump_label_mod *mod = key->next;
|
||||
|
||||
while (mod) {
|
||||
__jump_label_update(key, mod->entries, enable);
|
||||
mod = mod->next;
|
||||
}
|
||||
}
|
||||
|
||||
/***
|
||||
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
|
||||
* @mod: module to patch
|
||||
*
|
||||
* Allow for run-time selection of the optimal nops. Before the module
|
||||
* loads patch these with arch_get_jump_label_nop(), which is specified by
|
||||
* the arch specific jump label code.
|
||||
*/
|
||||
void jump_label_apply_nops(struct module *mod)
|
||||
{
|
||||
struct jump_entry *iter_start = mod->jump_entries;
|
||||
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
|
||||
struct jump_entry *iter;
|
||||
|
||||
/* if the module doesn't have jump label entries, just return */
|
||||
if (iter_start == iter_stop)
|
||||
return;
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++)
|
||||
arch_jump_label_text_poke_early(iter->code);
|
||||
}
|
||||
|
||||
static int jump_label_add_module(struct module *mod)
|
||||
{
|
||||
struct jump_entry *iter_start = mod->jump_entries;
|
||||
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
|
||||
struct jump_entry *iter;
|
||||
struct jump_label_key *key = NULL;
|
||||
struct jump_label_mod *jlm;
|
||||
|
||||
/* if the module doesn't have jump label entries, just return */
|
||||
if (iter_start == iter_stop)
|
||||
return 0;
|
||||
|
||||
jump_label_sort_entries(iter_start, iter_stop);
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
if (iter->key == (jump_label_t)(unsigned long)key)
|
||||
continue;
|
||||
|
||||
key = (struct jump_label_key *)(unsigned long)iter->key;
|
||||
|
||||
if (__module_address(iter->key) == mod) {
|
||||
atomic_set(&key->enabled, 0);
|
||||
key->entries = iter;
|
||||
key->next = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
|
||||
if (!jlm)
|
||||
return -ENOMEM;
|
||||
|
||||
jlm->mod = mod;
|
||||
jlm->entries = iter;
|
||||
jlm->next = key->next;
|
||||
key->next = jlm;
|
||||
|
||||
if (jump_label_enabled(key))
|
||||
__jump_label_update(key, iter, JUMP_LABEL_ENABLE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void jump_label_del_module(struct module *mod)
|
||||
{
|
||||
struct jump_entry *iter_start = mod->jump_entries;
|
||||
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
|
||||
struct jump_entry *iter;
|
||||
struct jump_label_key *key = NULL;
|
||||
struct jump_label_mod *jlm, **prev;
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
if (iter->key == (jump_label_t)(unsigned long)key)
|
||||
continue;
|
||||
|
||||
key = (struct jump_label_key *)(unsigned long)iter->key;
|
||||
|
||||
if (__module_address(iter->key) == mod)
|
||||
continue;
|
||||
|
||||
prev = &key->next;
|
||||
jlm = key->next;
|
||||
|
||||
while (jlm && jlm->mod != mod) {
|
||||
prev = &jlm->next;
|
||||
jlm = jlm->next;
|
||||
}
|
||||
|
||||
if (jlm) {
|
||||
*prev = jlm->next;
|
||||
kfree(jlm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void jump_label_invalidate_module_init(struct module *mod)
|
||||
{
|
||||
struct jump_entry *iter_start = mod->jump_entries;
|
||||
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
|
||||
struct jump_entry *iter;
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
if (within_module_init(iter->code, mod))
|
||||
iter->code = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
jump_label_module_notify(struct notifier_block *self, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct module *mod = data;
|
||||
int ret = 0;
|
||||
|
||||
switch (val) {
|
||||
case MODULE_STATE_COMING:
|
||||
jump_label_lock();
|
||||
ret = jump_label_add_module(mod);
|
||||
if (ret)
|
||||
jump_label_del_module(mod);
|
||||
jump_label_unlock();
|
||||
break;
|
||||
case MODULE_STATE_GOING:
|
||||
jump_label_lock();
|
||||
jump_label_del_module(mod);
|
||||
jump_label_unlock();
|
||||
break;
|
||||
case MODULE_STATE_LIVE:
|
||||
jump_label_lock();
|
||||
jump_label_invalidate_module_init(mod);
|
||||
jump_label_unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
return notifier_from_errno(ret);
|
||||
}
|
||||
|
||||
struct notifier_block jump_label_module_nb = {
|
||||
.notifier_call = jump_label_module_notify,
|
||||
.priority = 1, /* higher than tracepoints */
|
||||
};
|
||||
|
||||
static __init int jump_label_init_module(void)
|
||||
{
|
||||
return register_module_notifier(&jump_label_module_nb);
|
||||
}
|
||||
early_initcall(jump_label_init_module);
|
||||
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
/***
|
||||
* jump_label_text_reserved - check if addr range is reserved
|
||||
@ -248,237 +353,29 @@ out:
|
||||
*/
|
||||
int jump_label_text_reserved(void *start, void *end)
|
||||
{
|
||||
struct jump_entry *iter;
|
||||
struct jump_entry *iter_start = __start___jump_table;
|
||||
struct jump_entry *iter_stop = __start___jump_table;
|
||||
int conflict = 0;
|
||||
int ret = __jump_label_text_reserved(__start___jump_table,
|
||||
__stop___jump_table, start, end);
|
||||
|
||||
iter = iter_start;
|
||||
while (iter < iter_stop) {
|
||||
if (addr_conflict(iter, start, end)) {
|
||||
conflict = 1;
|
||||
goto out;
|
||||
}
|
||||
iter++;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* now check modules */
|
||||
#ifdef CONFIG_MODULES
|
||||
conflict = module_conflict(start, end);
|
||||
ret = __jump_label_mod_text_reserved(start, end);
|
||||
#endif
|
||||
out:
|
||||
return conflict;
|
||||
}
|
||||
|
||||
/*
|
||||
* Not all archs need this.
|
||||
*/
|
||||
void __weak arch_jump_label_text_poke_early(jump_label_t addr)
|
||||
{
|
||||
}
|
||||
|
||||
static __init int init_jump_label(void)
|
||||
{
|
||||
int ret;
|
||||
struct jump_entry *iter_start = __start___jump_table;
|
||||
struct jump_entry *iter_stop = __stop___jump_table;
|
||||
struct jump_entry *iter;
|
||||
|
||||
jump_label_lock();
|
||||
ret = build_jump_label_hashtable(__start___jump_table,
|
||||
__stop___jump_table);
|
||||
iter = iter_start;
|
||||
while (iter < iter_stop) {
|
||||
arch_jump_label_text_poke_early(iter->code);
|
||||
iter++;
|
||||
}
|
||||
jump_label_unlock();
|
||||
return ret;
|
||||
}
|
||||
early_initcall(init_jump_label);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
static struct jump_label_module_entry *
|
||||
add_jump_label_module_entry(struct jump_label_entry *entry,
|
||||
struct jump_entry *iter_begin,
|
||||
int count, struct module *mod)
|
||||
{
|
||||
struct jump_label_module_entry *e;
|
||||
|
||||
e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL);
|
||||
if (!e)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
e->mod = mod;
|
||||
e->nr_entries = count;
|
||||
e->table = iter_begin;
|
||||
hlist_add_head(&e->hlist, &entry->modules);
|
||||
return e;
|
||||
}
|
||||
|
||||
static int add_jump_label_module(struct module *mod)
|
||||
{
|
||||
struct jump_entry *iter, *iter_begin;
|
||||
struct jump_label_entry *entry;
|
||||
struct jump_label_module_entry *module_entry;
|
||||
int count;
|
||||
|
||||
/* if the module doesn't have jump label entries, just return */
|
||||
if (!mod->num_jump_entries)
|
||||
return 0;
|
||||
|
||||
sort_jump_label_entries(mod->jump_entries,
|
||||
mod->jump_entries + mod->num_jump_entries);
|
||||
iter = mod->jump_entries;
|
||||
while (iter < mod->jump_entries + mod->num_jump_entries) {
|
||||
entry = get_jump_label_entry(iter->key);
|
||||
iter_begin = iter;
|
||||
count = 0;
|
||||
while ((iter < mod->jump_entries + mod->num_jump_entries) &&
|
||||
(iter->key == iter_begin->key)) {
|
||||
iter++;
|
||||
count++;
|
||||
}
|
||||
if (!entry) {
|
||||
entry = add_jump_label_entry(iter_begin->key, 0, NULL);
|
||||
if (IS_ERR(entry))
|
||||
return PTR_ERR(entry);
|
||||
}
|
||||
module_entry = add_jump_label_module_entry(entry, iter_begin,
|
||||
count, mod);
|
||||
if (IS_ERR(module_entry))
|
||||
return PTR_ERR(module_entry);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void remove_jump_label_module(struct module *mod)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node, *node_next, *module_node, *module_node_next;
|
||||
struct jump_label_entry *e;
|
||||
struct jump_label_module_entry *e_module;
|
||||
int i;
|
||||
|
||||
/* if the module doesn't have jump label entries, just return */
|
||||
if (!mod->num_jump_entries)
|
||||
return;
|
||||
|
||||
for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
|
||||
head = &jump_label_table[i];
|
||||
hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
|
||||
hlist_for_each_entry_safe(e_module, module_node,
|
||||
module_node_next,
|
||||
&(e->modules), hlist) {
|
||||
if (e_module->mod == mod) {
|
||||
hlist_del(&e_module->hlist);
|
||||
kfree(e_module);
|
||||
}
|
||||
}
|
||||
if (hlist_empty(&e->modules) && (e->nr_entries == 0)) {
|
||||
hlist_del(&e->hlist);
|
||||
kfree(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_jump_label_module_init(struct module *mod)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node, *node_next, *module_node, *module_node_next;
|
||||
struct jump_label_entry *e;
|
||||
struct jump_label_module_entry *e_module;
|
||||
struct jump_entry *iter;
|
||||
int i, count;
|
||||
|
||||
/* if the module doesn't have jump label entries, just return */
|
||||
if (!mod->num_jump_entries)
|
||||
return;
|
||||
|
||||
for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
|
||||
head = &jump_label_table[i];
|
||||
hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
|
||||
hlist_for_each_entry_safe(e_module, module_node,
|
||||
module_node_next,
|
||||
&(e->modules), hlist) {
|
||||
if (e_module->mod != mod)
|
||||
continue;
|
||||
count = e_module->nr_entries;
|
||||
iter = e_module->table;
|
||||
while (count--) {
|
||||
if (within_module_init(iter->code, mod))
|
||||
iter->key = 0;
|
||||
iter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
jump_label_module_notify(struct notifier_block *self, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct module *mod = data;
|
||||
int ret = 0;
|
||||
|
||||
switch (val) {
|
||||
case MODULE_STATE_COMING:
|
||||
jump_label_lock();
|
||||
ret = add_jump_label_module(mod);
|
||||
if (ret)
|
||||
remove_jump_label_module(mod);
|
||||
jump_label_unlock();
|
||||
break;
|
||||
case MODULE_STATE_GOING:
|
||||
jump_label_lock();
|
||||
remove_jump_label_module(mod);
|
||||
jump_label_unlock();
|
||||
break;
|
||||
case MODULE_STATE_LIVE:
|
||||
jump_label_lock();
|
||||
remove_jump_label_module_init(mod);
|
||||
jump_label_unlock();
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/***
|
||||
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
|
||||
* @mod: module to patch
|
||||
*
|
||||
* Allow for run-time selection of the optimal nops. Before the module
|
||||
* loads patch these with arch_get_jump_label_nop(), which is specified by
|
||||
* the arch specific jump label code.
|
||||
*/
|
||||
void jump_label_apply_nops(struct module *mod)
|
||||
static void jump_label_update(struct jump_label_key *key, int enable)
|
||||
{
|
||||
struct jump_entry *iter;
|
||||
struct jump_entry *entry = key->entries;
|
||||
|
||||
/* if the module doesn't have jump label entries, just return */
|
||||
if (!mod->num_jump_entries)
|
||||
return;
|
||||
/* if there are no users, entry can be NULL */
|
||||
if (entry)
|
||||
__jump_label_update(key, entry, enable);
|
||||
|
||||
iter = mod->jump_entries;
|
||||
while (iter < mod->jump_entries + mod->num_jump_entries) {
|
||||
arch_jump_label_text_poke_early(iter->code);
|
||||
iter++;
|
||||
}
|
||||
#ifdef CONFIG_MODULES
|
||||
__jump_label_mod_update(key, enable);
|
||||
#endif
|
||||
}
|
||||
|
||||
struct notifier_block jump_label_module_nb = {
|
||||
.notifier_call = jump_label_module_notify,
|
||||
.priority = 0,
|
||||
};
|
||||
|
||||
static __init int init_jump_label_module(void)
|
||||
{
|
||||
return register_module_notifier(&jump_label_module_nb);
|
||||
}
|
||||
early_initcall(init_jump_label_module);
|
||||
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user