kernel: remove fastcall in kernel/*
[akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
fc9b52cd8f
commit
7ad5b3a505
@@ -18,7 +18,7 @@ void init_waitqueue_head(wait_queue_head_t *q)
|
||||
|
||||
EXPORT_SYMBOL(init_waitqueue_head);
|
||||
|
||||
void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -29,7 +29,7 @@ void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
}
|
||||
EXPORT_SYMBOL(add_wait_queue);
|
||||
|
||||
void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -40,7 +40,7 @@ void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
}
|
||||
EXPORT_SYMBOL(add_wait_queue_exclusive);
|
||||
|
||||
void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(remove_wait_queue);
|
||||
* stops them from bleeding out - it would still allow subsequent
|
||||
* loads to move into the critical region).
|
||||
*/
|
||||
void fastcall
|
||||
void
|
||||
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -82,7 +82,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_wait);
|
||||
|
||||
void fastcall
|
||||
void
|
||||
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -101,7 +101,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_wait_exclusive);
|
||||
|
||||
void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(wake_bit_function);
|
||||
* waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
|
||||
* permitted return codes. Nonzero return codes halt waiting and return.
|
||||
*/
|
||||
int __sched fastcall
|
||||
int __sched
|
||||
__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
int (*action)(void *), unsigned mode)
|
||||
{
|
||||
@@ -173,7 +173,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL(__wait_on_bit);
|
||||
|
||||
int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
|
||||
int __sched out_of_line_wait_on_bit(void *word, int bit,
|
||||
int (*action)(void *), unsigned mode)
|
||||
{
|
||||
wait_queue_head_t *wq = bit_waitqueue(word, bit);
|
||||
@@ -183,7 +183,7 @@ int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
|
||||
}
|
||||
EXPORT_SYMBOL(out_of_line_wait_on_bit);
|
||||
|
||||
int __sched fastcall
|
||||
int __sched
|
||||
__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
int (*action)(void *), unsigned mode)
|
||||
{
|
||||
@@ -201,7 +201,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL(__wait_on_bit_lock);
|
||||
|
||||
int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
|
||||
int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
|
||||
int (*action)(void *), unsigned mode)
|
||||
{
|
||||
wait_queue_head_t *wq = bit_waitqueue(word, bit);
|
||||
@@ -211,7 +211,7 @@ int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
|
||||
}
|
||||
EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
|
||||
|
||||
void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
|
||||
void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
|
||||
{
|
||||
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
|
||||
if (waitqueue_active(wq))
|
||||
@@ -236,13 +236,13 @@ EXPORT_SYMBOL(__wake_up_bit);
|
||||
* may need to use a less regular barrier, such fs/inode.c's smp_mb(),
|
||||
* because spin_unlock() does not guarantee a memory barrier.
|
||||
*/
|
||||
void fastcall wake_up_bit(void *word, int bit)
|
||||
void wake_up_bit(void *word, int bit)
|
||||
{
|
||||
__wake_up_bit(bit_waitqueue(word, bit), word, bit);
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_bit);
|
||||
|
||||
fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
|
||||
wait_queue_head_t *bit_waitqueue(void *word, int bit)
|
||||
{
|
||||
const int shift = BITS_PER_LONG == 32 ? 5 : 6;
|
||||
const struct zone *zone = page_zone(virt_to_page(word));
|
||||
|
Reference in New Issue
Block a user