Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
This commit is contained in:
@@ -111,6 +111,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
|
||||
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
|
||||
void acpi_irq_stats_init(void);
|
||||
extern u32 acpi_irq_handled;
|
||||
extern u32 acpi_irq_not_handled;
|
||||
|
||||
extern struct acpi_mcfg_allocation *pci_mmcfg_config;
|
||||
extern int pci_mmcfg_config_num;
|
||||
|
@@ -132,6 +132,7 @@ struct bio {
|
||||
* top 4 bits of bio flags indicate the pool this bio came from
|
||||
*/
|
||||
#define BIO_POOL_BITS (4)
|
||||
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
|
||||
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
|
||||
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
|
||||
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
|
||||
|
@@ -112,6 +112,25 @@ static inline unsigned fls_long(unsigned long l)
|
||||
return fls64(l);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs64 - find first set bit in a 64 bit word
|
||||
* @word: The 64 bit word
|
||||
*
|
||||
* On 64 bit arches this is a synomyn for __ffs
|
||||
* The result is not defined if no bits are set, so check that @word
|
||||
* is non-zero before calling this.
|
||||
*/
|
||||
static inline unsigned long __ffs64(u64 word)
|
||||
{
|
||||
#if BITS_PER_LONG == 32
|
||||
if (((u32)word) == 0UL)
|
||||
return __ffs((u32)(word >> 32)) + 32;
|
||||
#elif BITS_PER_LONG != 64
|
||||
#error BITS_PER_LONG not 32 or 64
|
||||
#endif
|
||||
return __ffs((unsigned long)word);
|
||||
}
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
|
||||
|
||||
|
@@ -118,6 +118,7 @@ enum rq_flag_bits {
|
||||
__REQ_COPY_USER, /* contains copies of user pages */
|
||||
__REQ_INTEGRITY, /* integrity metadata has been remapped */
|
||||
__REQ_NOIDLE, /* Don't anticipate more IO after this one */
|
||||
__REQ_IO_STAT, /* account I/O stat */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
@@ -145,6 +146,7 @@ enum rq_flag_bits {
|
||||
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
|
||||
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
|
||||
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
|
||||
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
|
||||
|
||||
#define BLK_MAX_CDB 16
|
||||
|
||||
@@ -598,6 +600,7 @@ enum {
|
||||
blk_failfast_transport(rq) || \
|
||||
blk_failfast_driver(rq))
|
||||
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
|
||||
#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
|
||||
|
||||
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
|
||||
|
||||
|
@@ -143,7 +143,9 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
|
||||
* 400-499: Perfect
|
||||
* The ideal clocksource. A must-use where
|
||||
* available.
|
||||
* @read: returns a cycle value
|
||||
* @read: returns a cycle value, passes clocksource as argument
|
||||
* @enable: optional function to enable the clocksource
|
||||
* @disable: optional function to disable the clocksource
|
||||
* @mask: bitmask for two's complement
|
||||
* subtraction of non 64 bit counters
|
||||
* @mult: cycle to nanosecond multiplier (adjusted by NTP)
|
||||
@@ -162,7 +164,9 @@ struct clocksource {
|
||||
char *name;
|
||||
struct list_head list;
|
||||
int rating;
|
||||
cycle_t (*read)(void);
|
||||
cycle_t (*read)(struct clocksource *cs);
|
||||
int (*enable)(struct clocksource *cs);
|
||||
void (*disable)(struct clocksource *cs);
|
||||
cycle_t mask;
|
||||
u32 mult;
|
||||
u32 mult_orig;
|
||||
@@ -271,7 +275,34 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
|
||||
*/
|
||||
static inline cycle_t clocksource_read(struct clocksource *cs)
|
||||
{
|
||||
return cs->read();
|
||||
return cs->read(cs);
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_enable: - enable clocksource
|
||||
* @cs: pointer to clocksource
|
||||
*
|
||||
* Enables the specified clocksource. The clocksource callback
|
||||
* function should start up the hardware and setup mult and field
|
||||
* members of struct clocksource to reflect hardware capabilities.
|
||||
*/
|
||||
static inline int clocksource_enable(struct clocksource *cs)
|
||||
{
|
||||
return cs->enable ? cs->enable(cs) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_disable: - disable clocksource
|
||||
* @cs: pointer to clocksource
|
||||
*
|
||||
* Disables the specified clocksource. The clocksource callback
|
||||
* function should power down the now unused hardware block to
|
||||
* save power.
|
||||
*/
|
||||
static inline void clocksource_disable(struct clocksource *cs)
|
||||
{
|
||||
if (cs->disable)
|
||||
cs->disable(cs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -551,6 +551,7 @@ extern int (*platform_notify_remove)(struct device *dev);
|
||||
extern struct device *get_device(struct device *dev);
|
||||
extern void put_device(struct device *dev);
|
||||
|
||||
extern void wait_for_device_probe(void);
|
||||
|
||||
/* drivers/base/power/shutdown.c */
|
||||
extern void device_shutdown(void);
|
||||
|
@@ -2299,9 +2299,8 @@ extern int vfs_readdir(struct file *, filldir_t, void *);
|
||||
|
||||
extern int vfs_stat(char __user *, struct kstat *);
|
||||
extern int vfs_lstat(char __user *, struct kstat *);
|
||||
extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
|
||||
extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
|
||||
extern int vfs_fstat(unsigned int, struct kstat *);
|
||||
extern int vfs_fstatat(int , char __user *, struct kstat *, int);
|
||||
|
||||
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
@@ -2449,7 +2448,7 @@ struct ctl_table;
|
||||
int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
int get_filesystem_list(char * buf);
|
||||
int __init get_filesystem_list(char *buf);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_FS_H */
|
||||
|
@@ -79,6 +79,10 @@ struct fsl_spi_platform_data {
|
||||
u16 max_chipselect;
|
||||
void (*cs_control)(struct spi_device *spi, bool on);
|
||||
u32 sysclk;
|
||||
|
||||
/* Legacy hooks, used by mpc52xx_psc_spi driver. */
|
||||
void (*activate_cs)(u8 cs, u8 polarity);
|
||||
void (*deactivate_cs)(u8 cs, u8 polarity);
|
||||
};
|
||||
|
||||
struct mpc8xx_pcmcia_ops {
|
||||
|
@@ -214,6 +214,7 @@ static inline void disk_put_part(struct hd_struct *part)
|
||||
#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
|
||||
#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
|
||||
#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
|
||||
#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
|
||||
|
||||
struct disk_part_iter {
|
||||
struct gendisk *disk;
|
||||
|
@@ -2,6 +2,8 @@
|
||||
#define _LINUX_INIT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/section-names.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
/* These macros are used to mark some functions or
|
||||
* initialized data (doesn't apply to uninitialized data)
|
||||
@@ -60,14 +62,6 @@
|
||||
#define __refdata __section(.ref.data)
|
||||
#define __refconst __section(.ref.rodata)
|
||||
|
||||
/* backward compatibility note
|
||||
* A few places hardcode the old section names:
|
||||
* .text.init.refok
|
||||
* .data.init.refok
|
||||
* .exit.text.refok
|
||||
* They should be converted to use the defines from this file
|
||||
*/
|
||||
|
||||
/* compatibility defines */
|
||||
#define __init_refok __ref
|
||||
#define __initdata_refok __refdata
|
||||
@@ -107,7 +101,7 @@
|
||||
#define __memexitconst __section(.memexit.rodata)
|
||||
|
||||
/* For assembly routines */
|
||||
#define __HEAD .section ".head.text","ax"
|
||||
#define __HEAD .section __stringify(HEAD_TEXT_SECTION),"ax"
|
||||
#define __INIT .section ".init.text","ax"
|
||||
#define __FINIT .previous
|
||||
|
||||
|
@@ -198,6 +198,8 @@ struct kernel_ipmi_msg {
|
||||
response. When you send a
|
||||
response message, this will
|
||||
be returned. */
|
||||
#define IPMI_OEM_RECV_TYPE 5 /* The response for OEM Channels */
|
||||
|
||||
/* Note that async events and received commands do not have a completion
|
||||
code as the first byte of the incoming data, unlike a response. */
|
||||
|
||||
|
@@ -58,6 +58,12 @@
|
||||
#define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35
|
||||
#define IPMI_GET_CHANNEL_INFO_CMD 0x42
|
||||
|
||||
/* Bit for BMC global enables. */
|
||||
#define IPMI_BMC_RCV_MSG_INTR 0x01
|
||||
#define IPMI_BMC_EVT_MSG_INTR 0x02
|
||||
#define IPMI_BMC_EVT_MSG_BUFF 0x04
|
||||
#define IPMI_BMC_SYS_LOG 0x08
|
||||
|
||||
#define IPMI_NETFN_STORAGE_REQUEST 0x0a
|
||||
#define IPMI_NETFN_STORAGE_RESPONSE 0x0b
|
||||
#define IPMI_ADD_SEL_ENTRY_CMD 0x44
|
||||
@@ -109,5 +115,7 @@
|
||||
#define IPMI_CHANNEL_MEDIUM_USB1 10
|
||||
#define IPMI_CHANNEL_MEDIUM_USB2 11
|
||||
#define IPMI_CHANNEL_MEDIUM_SYSINTF 12
|
||||
#define IPMI_CHANNEL_MEDIUM_OEM_MIN 0x60
|
||||
#define IPMI_CHANNEL_MEDIUM_OEM_MAX 0x7f
|
||||
|
||||
#endif /* __LINUX_IPMI_MSGDEFS_H */
|
||||
|
@@ -978,7 +978,8 @@ extern void journal_destroy_revoke(journal_t *);
|
||||
extern int journal_revoke (handle_t *,
|
||||
unsigned long, struct buffer_head *);
|
||||
extern int journal_cancel_revoke(handle_t *, struct journal_head *);
|
||||
extern void journal_write_revoke_records(journal_t *, transaction_t *);
|
||||
extern void journal_write_revoke_records(journal_t *,
|
||||
transaction_t *, int);
|
||||
|
||||
/* Recovery revoke support */
|
||||
extern int journal_set_revoke(journal_t *, unsigned long, tid_t);
|
||||
|
@@ -1193,7 +1193,8 @@ extern int jbd2_journal_init_revoke_caches(void);
|
||||
extern void jbd2_journal_destroy_revoke(journal_t *);
|
||||
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
|
||||
extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
|
||||
extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
|
||||
extern void jbd2_journal_write_revoke_records(journal_t *,
|
||||
transaction_t *, int);
|
||||
|
||||
/* Recovery revoke support */
|
||||
extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
|
||||
|
@@ -409,6 +409,8 @@ struct kvm_trace_rec {
|
||||
#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
|
||||
#define KVM_CAP_DEVICE_DEASSIGNMENT 27
|
||||
#endif
|
||||
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
|
||||
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
@@ -75,7 +75,7 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
|
||||
{
|
||||
struct mem_cgroup *mem;
|
||||
rcu_read_lock();
|
||||
mem = mem_cgroup_from_task((mm)->owner);
|
||||
mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
|
||||
rcu_read_unlock();
|
||||
return cgroup == mem;
|
||||
}
|
||||
|
@@ -104,7 +104,7 @@ struct wireless_dev;
|
||||
# else
|
||||
# define LL_MAX_HEADER 96
|
||||
# endif
|
||||
#elif defined(CONFIG_TR)
|
||||
#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
|
||||
# define LL_MAX_HEADER 48
|
||||
#else
|
||||
# define LL_MAX_HEADER 32
|
||||
@@ -500,7 +500,7 @@ struct netdev_queue {
|
||||
*
|
||||
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
|
||||
* This function is called when the Media Access Control address
|
||||
* needs to be changed. If not this interface is not defined, the
|
||||
* needs to be changed. If this interface is not defined, the
|
||||
* mac address can not be changed.
|
||||
*
|
||||
* int (*ndo_validate_addr)(struct net_device *dev);
|
||||
|
@@ -100,6 +100,7 @@ enum ctattr_protoinfo_tcp {
|
||||
enum ctattr_protoinfo_dccp {
|
||||
CTA_PROTOINFO_DCCP_UNSPEC,
|
||||
CTA_PROTOINFO_DCCP_STATE,
|
||||
CTA_PROTOINFO_DCCP_ROLE,
|
||||
__CTA_PROTOINFO_DCCP_MAX,
|
||||
};
|
||||
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
|
||||
|
@@ -354,9 +354,6 @@ struct xt_table
|
||||
/* What hooks you will enter on */
|
||||
unsigned int valid_hooks;
|
||||
|
||||
/* Lock for the curtain */
|
||||
struct mutex lock;
|
||||
|
||||
/* Man behind the curtain... */
|
||||
struct xt_table_info *private;
|
||||
|
||||
@@ -434,8 +431,74 @@ extern void xt_proto_fini(struct net *net, u_int8_t af);
|
||||
|
||||
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
|
||||
extern void xt_free_table_info(struct xt_table_info *info);
|
||||
extern void xt_table_entry_swap_rcu(struct xt_table_info *old,
|
||||
struct xt_table_info *new);
|
||||
|
||||
/*
|
||||
* Per-CPU spinlock associated with per-cpu table entries, and
|
||||
* with a counter for the "reading" side that allows a recursive
|
||||
* reader to avoid taking the lock and deadlocking.
|
||||
*
|
||||
* "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
|
||||
* It needs to ensure that the rules are not being changed while the packet
|
||||
* is being processed. In some cases, the read lock will be acquired
|
||||
* twice on the same CPU; this is okay because of the count.
|
||||
*
|
||||
* "writing" is used when reading counters.
|
||||
* During replace any readers that are using the old tables have to complete
|
||||
* before freeing the old table. This is handled by the write locking
|
||||
* necessary for reading the counters.
|
||||
*/
|
||||
struct xt_info_lock {
|
||||
spinlock_t lock;
|
||||
unsigned char readers;
|
||||
};
|
||||
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
|
||||
|
||||
/*
|
||||
* Note: we need to ensure that preemption is disabled before acquiring
|
||||
* the per-cpu-variable, so we do it as a two step process rather than
|
||||
* using "spin_lock_bh()".
|
||||
*
|
||||
* We _also_ need to disable bottom half processing before updating our
|
||||
* nesting count, to make sure that the only kind of re-entrancy is this
|
||||
* code being called by itself: since the count+lock is not an atomic
|
||||
* operation, we can allow no races.
|
||||
*
|
||||
* _Only_ that special combination of being per-cpu and never getting
|
||||
* re-entered asynchronously means that the count is safe.
|
||||
*/
|
||||
static inline void xt_info_rdlock_bh(void)
|
||||
{
|
||||
struct xt_info_lock *lock;
|
||||
|
||||
local_bh_disable();
|
||||
lock = &__get_cpu_var(xt_info_locks);
|
||||
if (!lock->readers++)
|
||||
spin_lock(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void xt_info_rdunlock_bh(void)
|
||||
{
|
||||
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
|
||||
|
||||
if (!--lock->readers)
|
||||
spin_unlock(&lock->lock);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* The "writer" side needs to get exclusive access to the lock,
|
||||
* regardless of readers. This must be called with bottom half
|
||||
* processing (and thus also preemption) disabled.
|
||||
*/
|
||||
static inline void xt_info_wrlock(unsigned int cpu)
|
||||
{
|
||||
spin_lock(&per_cpu(xt_info_locks, cpu).lock);
|
||||
}
|
||||
|
||||
static inline void xt_info_wrunlock(unsigned int cpu)
|
||||
{
|
||||
spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This helper is performance critical and must be inlined
|
||||
|
@@ -376,6 +376,7 @@
|
||||
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
|
||||
#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
|
||||
#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
|
||||
#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
|
||||
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
|
||||
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
|
||||
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
|
||||
|
84
include/linux/percpu-defs.h
Normal file
84
include/linux/percpu-defs.h
Normal file
@@ -0,0 +1,84 @@
|
||||
#ifndef _LINUX_PERCPU_DEFS_H
|
||||
#define _LINUX_PERCPU_DEFS_H
|
||||
|
||||
/*
|
||||
* Determine the real variable name from the name visible in the
|
||||
* kernel sources.
|
||||
*/
|
||||
#define per_cpu_var(var) per_cpu__##var
|
||||
|
||||
/*
|
||||
* Base implementations of per-CPU variable declarations and definitions, where
|
||||
* the section in which the variable is to be placed is provided by the
|
||||
* 'section' argument. This may be used to affect the parameters governing the
|
||||
* variable's storage.
|
||||
*
|
||||
* NOTE! The sections for the DECLARE and for the DEFINE must match, lest
|
||||
* linkage errors occur due the compiler generating the wrong code to access
|
||||
* that section.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_SECTION(type, name, section) \
|
||||
extern \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
#define DEFINE_PER_CPU_SECTION(type, name, section) \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
/*
|
||||
* Variant on the per-CPU variable declaration/definition theme used for
|
||||
* ordinary per-CPU variables.
|
||||
*/
|
||||
#define DECLARE_PER_CPU(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
/*
|
||||
* Declaration/definition used for per-CPU variables that must come first in
|
||||
* the set of variables.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_FIRST(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
#define DEFINE_PER_CPU_FIRST(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
/*
|
||||
* Declaration/definition used for per-CPU variables that must be cacheline
|
||||
* aligned under SMP conditions so that, whilst a particular instance of the
|
||||
* data corresponds to a particular CPU, inefficiencies due to direct access by
|
||||
* other CPUs are reduced by preventing the data from unnecessarily spanning
|
||||
* cachelines.
|
||||
*
|
||||
* An example of this would be statistical data, where each CPU's set of data
|
||||
* is updated by that CPU alone, but the data from across all CPUs is collated
|
||||
* by a CPU processing a read from a proc file.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
/*
|
||||
* Declaration/definition used for per-CPU variables that must be page aligned.
|
||||
*/
|
||||
#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DECLARE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
/*
|
||||
* Intermodule exports for per-CPU variables.
|
||||
*/
|
||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
||||
|
||||
|
||||
#endif /* _LINUX_PERCPU_DEFS_H */
|
@@ -9,50 +9,6 @@
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#ifndef PER_CPU_BASE_SECTION
|
||||
#ifdef CONFIG_SMP
|
||||
#define PER_CPU_BASE_SECTION ".data.percpu"
|
||||
#else
|
||||
#define PER_CPU_BASE_SECTION ".data"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef MODULE
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#else
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
|
||||
#endif
|
||||
#define PER_CPU_FIRST_SECTION ".first"
|
||||
|
||||
#else
|
||||
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#define PER_CPU_FIRST_SECTION ""
|
||||
|
||||
#endif
|
||||
|
||||
#define DEFINE_PER_CPU_SECTION(type, name, section) \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
#define DEFINE_PER_CPU_FIRST(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
||||
|
||||
/* enough to cover all DEFINE_PER_CPUs in modules */
|
||||
#ifdef CONFIG_MODULES
|
||||
#define PERCPU_MODULE_RESERVE (8 << 10)
|
||||
|
@@ -113,6 +113,7 @@ struct pkt_ctrl_command {
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/mempool.h>
|
||||
|
||||
/* default bio write queue congestion marks */
|
||||
#define PKT_WRITE_CONGESTION_ON 10000
|
||||
|
@@ -50,6 +50,7 @@ enum regulator_status {
|
||||
* @set_current_limit: Configure a limit for a current-limited regulator.
|
||||
* @get_current_limit: Get the configured limit for a current-limited regulator.
|
||||
*
|
||||
* @set_mode: Set the configured operating mode for the regulator.
|
||||
* @get_mode: Get the configured operating mode for the regulator.
|
||||
* @get_status: Return actual (not as-configured) status of regulator, as a
|
||||
* REGULATOR_STATUS value (or negative errno)
|
||||
|
@@ -193,7 +193,7 @@ struct reiserfs_journal {
|
||||
atomic_t j_wcount; /* count of writers for current commit */
|
||||
unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */
|
||||
unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */
|
||||
unsigned long j_last_flush_trans_id; /* last fully flushed journal timestamp */
|
||||
unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */
|
||||
struct buffer_head *j_header_bh;
|
||||
|
||||
time_t j_trans_start_time; /* time this transaction started */
|
||||
|
6
include/linux/section-names.h
Normal file
6
include/linux/section-names.h
Normal file
@@ -0,0 +1,6 @@
|
||||
#ifndef __LINUX_SECTION_NAMES_H
|
||||
#define __LINUX_SECTION_NAMES_H
|
||||
|
||||
#define HEAD_TEXT_SECTION .head.text
|
||||
|
||||
#endif /* !__LINUX_SECTION_NAMES_H */
|
@@ -67,7 +67,7 @@ static inline void slow_work_init(struct slow_work *work,
|
||||
}
|
||||
|
||||
/**
|
||||
* slow_work_init - Initialise a very slow work item
|
||||
* vslow_work_init - Initialise a very slow work item
|
||||
* @work: The work item to initialise
|
||||
* @ops: The operations to use to handle the slow work item
|
||||
*
|
||||
|
@@ -245,7 +245,12 @@ struct spi_master {
|
||||
*/
|
||||
u16 dma_alignment;
|
||||
|
||||
/* setup mode and clock, etc (spi driver may call many times) */
|
||||
/* Setup mode and clock, etc (spi driver may call many times).
|
||||
*
|
||||
* IMPORTANT: this may be called when transfers to another
|
||||
* device are active. DO NOT UPDATE SHARED REGISTERS in ways
|
||||
* which could break those transfers.
|
||||
*/
|
||||
int (*setup)(struct spi_device *spi);
|
||||
|
||||
/* bidirectional bulk transfers
|
||||
|
@@ -148,7 +148,7 @@ struct old_linux_dirent;
|
||||
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
|
||||
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
|
||||
#else
|
||||
#ifdef CONFIG_ALPHA
|
||||
#if defined(CONFIG_ALPHA) || defined(CONFIG_MIPS)
|
||||
#define SYSCALL_ALIAS(alias, name) \
|
||||
asm ( #alias " = " #name "\n\t.globl " #alias)
|
||||
#else
|
||||
|
@@ -7,6 +7,9 @@
|
||||
* key configuration differences between boards.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_USB_MUSB_H
|
||||
#define __LINUX_USB_MUSB_H
|
||||
|
||||
/* The USB role is defined by the connector used on the board, so long as
|
||||
* standards are being followed. (Developer boards sometimes won't.)
|
||||
*/
|
||||
@@ -101,3 +104,5 @@ extern int __init tusb6010_setup_interface(
|
||||
extern int tusb6010_platform_retime(unsigned is_refclk);
|
||||
|
||||
#endif /* OMAP2 */
|
||||
|
||||
#endif /* __LINUX_USB_MUSB_H */
|
||||
|
@@ -440,13 +440,15 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
|
||||
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
||||
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
||||
|
||||
#define DEFINE_WAIT(name) \
|
||||
#define DEFINE_WAIT_FUNC(name, function) \
|
||||
wait_queue_t name = { \
|
||||
.private = current, \
|
||||
.func = autoremove_wake_function, \
|
||||
.func = function, \
|
||||
.task_list = LIST_HEAD_INIT((name).task_list), \
|
||||
}
|
||||
|
||||
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
|
||||
|
||||
#define DEFINE_WAIT_BIT(name, word, bit) \
|
||||
struct wait_bit_queue name = { \
|
||||
.key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
|
||||
|
Reference in New Issue
Block a user