[IA64] run some drivers/misc/sgi-xp through scripts/Lindent
Ran patches through scripts/Lindent (part 1). Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
@@ -6,30 +6,25 @@
|
||||
* Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* External Cross Partition (XP) structures and defines.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _DRIVERS_MISC_SGIXP_XP_H
|
||||
#define _DRIVERS_MISC_SGIXP_XP_H
|
||||
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/sn/types.h>
|
||||
#include <asm/sn/bte.h>
|
||||
|
||||
|
||||
#ifdef USE_DBUG_ON
|
||||
#define DBUG_ON(condition) BUG_ON(condition)
|
||||
#else
|
||||
#define DBUG_ON(condition)
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Define the maximum number of logically defined partitions the system
|
||||
* can support. It is constrained by the maximum number of hardware
|
||||
@@ -43,7 +38,6 @@
|
||||
*/
|
||||
#define XP_MAX_PARTITIONS 64
|
||||
|
||||
|
||||
/*
|
||||
* Define the number of u64s required to represent all the C-brick nasids
|
||||
* as a bitmap. The cross-partition kernel modules deal only with
|
||||
@@ -54,7 +48,6 @@
|
||||
#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
|
||||
#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
|
||||
|
||||
|
||||
/*
|
||||
* Wrapper for bte_copy() that should it return a failure status will retry
|
||||
* the bte_copy() once in the hope that the failure was due to a temporary
|
||||
@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
|
||||
bte_result_t ret;
|
||||
u64 pdst = ia64_tpa(vdst);
|
||||
|
||||
|
||||
/*
|
||||
* Ensure that the physically mapped memory is contiguous.
|
||||
*
|
||||
@@ -96,7 +88,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* XPC establishes channel connections between the local partition and any
|
||||
* other partition that is currently up. Over these channels, kernel-level
|
||||
@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
|
||||
#error XPC_NCHANNELS exceeds MAXIMUM allowed.
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* The format of an XPC message is as follows:
|
||||
*
|
||||
@@ -160,12 +150,10 @@ struct xpc_msg {
|
||||
u64 payload; /* user defined portion of message */
|
||||
};
|
||||
|
||||
|
||||
#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
|
||||
#define XPC_MSG_SIZE(_payload_size) \
|
||||
L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
|
||||
|
||||
|
||||
/*
|
||||
* Define the return values and values passed to user's callout functions.
|
||||
* (It is important to add new value codes at the end just preceding
|
||||
@@ -270,7 +258,6 @@ enum xpc_retval {
|
||||
xpcUnknownReason /* 116: unknown reason -- must be last in list */
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Define the callout function types used by XPC to update the user on
|
||||
* connection activity and state changes (via the user function registered by
|
||||
@@ -381,7 +368,6 @@ typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid,
|
||||
typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
|
||||
int ch_number, void *key);
|
||||
|
||||
|
||||
/*
|
||||
* The following is a registration entry. There is a global array of these,
|
||||
* one per channel. It is used to record the connection registration made
|
||||
@@ -406,15 +392,12 @@ struct xpc_registration {
|
||||
u32 idle_limit; /* limit on #of idle kthreads */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
||||
#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
|
||||
|
||||
|
||||
/* the following are valid xpc_allocate() flags */
|
||||
#define XPC_WAIT 0 /* wait flag */
|
||||
#define XPC_NOWAIT 1 /* no wait flag */
|
||||
|
||||
|
||||
struct xpc_interface {
|
||||
void (*connect) (int);
|
||||
void (*disconnect) (int);
|
||||
@@ -426,20 +409,18 @@ struct xpc_interface {
|
||||
enum xpc_retval (*partid_to_nasids) (partid_t, void *);
|
||||
};
|
||||
|
||||
|
||||
extern struct xpc_interface xpc_interface;
|
||||
|
||||
extern void xpc_set_interface(void (*)(int),
|
||||
void (*)(int),
|
||||
enum xpc_retval (*)(partid_t, int, u32, void **),
|
||||
enum xpc_retval (*)(partid_t, int, void *),
|
||||
enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func,
|
||||
void *),
|
||||
enum xpc_retval (*)(partid_t, int, void *,
|
||||
xpc_notify_func, void *),
|
||||
void (*)(partid_t, int, void *),
|
||||
enum xpc_retval (*)(partid_t, void *));
|
||||
extern void xpc_clear_interface(void);
|
||||
|
||||
|
||||
extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
|
||||
u16, u32, u32);
|
||||
extern void xpc_disconnect(int);
|
||||
@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids)
|
||||
return xpc_interface.partid_to_nasids(partid, nasids);
|
||||
}
|
||||
|
||||
|
||||
extern u64 xp_nofault_PIOR_target;
|
||||
extern int xp_nofault_PIOR(void *);
|
||||
extern int xp_error_PIOR(void);
|
||||
|
||||
|
||||
#endif /* _DRIVERS_MISC_SGIXP_XP_H */
|
||||
|
||||
|
@@ -6,7 +6,6 @@
|
||||
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Cross Partition (XP) base.
|
||||
*
|
||||
@@ -15,7 +14,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
@@ -24,24 +22,25 @@
|
||||
#include <asm/sn/sn_sal.h>
|
||||
#include "xp.h"
|
||||
|
||||
|
||||
/*
|
||||
* Target of nofault PIO read.
|
||||
*/
|
||||
u64 xp_nofault_PIOR_target;
|
||||
|
||||
|
||||
/*
|
||||
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
|
||||
* users of XPC.
|
||||
*/
|
||||
struct xpc_registration xpc_registrations[XPC_NCHANNELS];
|
||||
|
||||
|
||||
/*
|
||||
* Initialize the XPC interface to indicate that XPC isn't loaded.
|
||||
*/
|
||||
static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
|
||||
static enum xpc_retval
|
||||
xpc_notloaded(void)
|
||||
{
|
||||
return xpcNotLoaded;
|
||||
}
|
||||
|
||||
struct xpc_interface xpc_interface = {
|
||||
(void (*)(int))xpc_notloaded,
|
||||
@@ -54,7 +53,6 @@ struct xpc_interface xpc_interface = {
|
||||
(enum xpc_retval(*)(partid_t, void *))xpc_notloaded
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* XPC calls this when it (the XPC module) has been loaded.
|
||||
*/
|
||||
@@ -77,7 +75,6 @@ xpc_set_interface(void (*connect)(int),
|
||||
xpc_interface.partid_to_nasids = partid_to_nasids;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* XPC calls this when it (the XPC module) is being unloaded.
|
||||
*/
|
||||
@@ -91,14 +88,14 @@ xpc_clear_interface(void)
|
||||
xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
|
||||
xpc_notloaded;
|
||||
xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
|
||||
xpc_notify_func, void *)) xpc_notloaded;
|
||||
xpc_notify_func,
|
||||
void *))xpc_notloaded;
|
||||
xpc_interface.received = (void (*)(partid_t, int, void *))
|
||||
xpc_notloaded;
|
||||
xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
|
||||
xpc_notloaded;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Register for automatic establishment of a channel connection whenever
|
||||
* a partition comes up.
|
||||
@@ -129,7 +126,6 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
|
||||
{
|
||||
struct xpc_registration *registration;
|
||||
|
||||
|
||||
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
|
||||
DBUG_ON(payload_size == 0 || nentries == 0);
|
||||
DBUG_ON(func == NULL);
|
||||
@@ -162,7 +158,6 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
|
||||
return xpcSuccess;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Remove the registration for automatic connection of the specified channel
|
||||
* when a partition comes up.
|
||||
@@ -181,7 +176,6 @@ xpc_disconnect(int ch_number)
|
||||
{
|
||||
struct xpc_registration *registration;
|
||||
|
||||
|
||||
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
|
||||
|
||||
registration = &xpc_registrations[ch_number];
|
||||
@@ -214,7 +208,6 @@ xpc_disconnect(int ch_number)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
int __init
|
||||
xp_init(void)
|
||||
{
|
||||
@@ -222,7 +215,6 @@ xp_init(void)
|
||||
u64 func_addr = *(u64 *)xp_nofault_PIOR;
|
||||
u64 err_func_addr = *(u64 *)xp_error_PIOR;
|
||||
|
||||
|
||||
if (!ia64_platform_is("sn2")) {
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -258,8 +250,8 @@ xp_init(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(xp_init);
|
||||
|
||||
module_init(xp_init);
|
||||
|
||||
void __exit
|
||||
xp_exit(void)
|
||||
@@ -267,13 +259,12 @@ xp_exit(void)
|
||||
u64 func_addr = *(u64 *)xp_nofault_PIOR;
|
||||
u64 err_func_addr = *(u64 *)xp_error_PIOR;
|
||||
|
||||
|
||||
/* unregister the PIO read nofault code region */
|
||||
(void)sn_register_nofault_code(func_addr, err_func_addr,
|
||||
err_func_addr, 1, 0);
|
||||
}
|
||||
module_exit(xp_exit);
|
||||
|
||||
module_exit(xp_exit);
|
||||
|
||||
MODULE_AUTHOR("Silicon Graphics, Inc.");
|
||||
MODULE_DESCRIPTION("Cross Partition (XP) base");
|
||||
@@ -287,4 +278,3 @@ EXPORT_SYMBOL(xpc_clear_interface);
|
||||
EXPORT_SYMBOL(xpc_set_interface);
|
||||
EXPORT_SYMBOL(xpc_connect);
|
||||
EXPORT_SYMBOL(xpc_disconnect);
|
||||
|
||||
|
@@ -6,7 +6,6 @@
|
||||
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* The xp_nofault_PIOR function takes a pointer to a remote PIO register
|
||||
* and attempts to load and consume a value from it. This function
|
||||
|
@@ -6,7 +6,6 @@
|
||||
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Cross Partition Communication (XPC) structures and macros.
|
||||
*/
|
||||
@@ -14,7 +13,6 @@
|
||||
#ifndef _DRIVERS_MISC_SGIXP_XPC_H
|
||||
#define _DRIVERS_MISC_SGIXP_XPC_H
|
||||
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/device.h>
|
||||
@@ -29,7 +27,6 @@
|
||||
#include <asm/sn/shub_mmr.h>
|
||||
#include "xp.h"
|
||||
|
||||
|
||||
/*
|
||||
* XPC Version numbers consist of a major and minor number. XPC can always
|
||||
* talk to versions with same major #, and never talk to versions with a
|
||||
@@ -39,7 +36,6 @@
|
||||
#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
|
||||
#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
|
||||
|
||||
|
||||
/*
|
||||
* The next macros define word or bit representations for given
|
||||
* C-brick nasid in either the SAL provided bit array representing
|
||||
@@ -67,7 +63,6 @@
|
||||
/* define the process name of the discovery thread */
|
||||
#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
|
||||
|
||||
|
||||
/*
|
||||
* the reserved page
|
||||
*
|
||||
@@ -138,14 +133,12 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
||||
if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
|
||||
ret = stamp1->tv_nsec - stamp2->tv_nsec;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Define the structures by which XPC variables can be exported to other
|
||||
* partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
|
||||
@@ -177,7 +170,6 @@ struct xpc_vars {
|
||||
#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
|
||||
(_version >= _XPC_VERSION(3,1))
|
||||
|
||||
|
||||
static inline int
|
||||
xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
|
||||
{
|
||||
@@ -208,7 +200,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
|
||||
old_mask);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The AMOs page consists of a number of AMO variables which are divided into
|
||||
* four groups, The first two groups are used to identify an IRQ's sender.
|
||||
@@ -222,7 +213,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
|
||||
#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
|
||||
#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
|
||||
|
||||
|
||||
/*
|
||||
* The following structure describes the per partition specific variables.
|
||||
*
|
||||
@@ -260,7 +250,6 @@ struct xpc_vars_part {
|
||||
#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
|
||||
#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
|
||||
|
||||
|
||||
/* the reserved page sizes and offsets */
|
||||
|
||||
#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
|
||||
@@ -271,7 +260,6 @@ struct xpc_vars_part {
|
||||
#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
|
||||
#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
|
||||
|
||||
|
||||
/*
|
||||
* Functions registered by add_timer() or called by kernel_thread() only
|
||||
* allow for a single 64-bit argument. The following macros can be used to
|
||||
@@ -285,8 +273,6 @@ struct xpc_vars_part {
|
||||
#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
|
||||
#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Define a Get/Put value pair (pointers) used with a message queue.
|
||||
*/
|
||||
@@ -298,8 +284,6 @@ struct xpc_gp {
|
||||
#define XPC_GP_SIZE \
|
||||
L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Define a structure that contains arguments associated with opening and
|
||||
* closing a channel.
|
||||
@@ -315,20 +299,15 @@ struct xpc_openclose_args {
|
||||
#define XPC_OPENCLOSE_ARGS_SIZE \
|
||||
L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
|
||||
|
||||
|
||||
|
||||
/* struct xpc_msg flags */
|
||||
|
||||
#define XPC_M_DONE 0x01 /* msg has been received/consumed */
|
||||
#define XPC_M_READY 0x02 /* msg is ready to be sent */
|
||||
#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
|
||||
|
||||
|
||||
#define XPC_MSG_ADDRESS(_payload) \
|
||||
((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Defines notify entry.
|
||||
*
|
||||
@@ -347,8 +326,6 @@ struct xpc_notify {
|
||||
|
||||
#define XPC_N_CALL 0x01 /* notify function provided by user */
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Define the structure that manages all the stuff required by a channel. In
|
||||
* particular, they are used to manage the messages sent across the channel.
|
||||
@@ -496,7 +473,6 @@ struct xpc_channel {
|
||||
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
||||
/* struct xpc_channel flags */
|
||||
|
||||
#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
|
||||
@@ -526,8 +502,6 @@ struct xpc_channel {
|
||||
0x00020000 /* disconnecting callout completed */
|
||||
#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Manages channels on a partition basis. There is one of these structures
|
||||
* for each partition (a partition will never utilize the structure that
|
||||
@@ -557,14 +531,12 @@ struct xpc_partition {
|
||||
unsigned long disengage_request_timeout; /* timeout in jiffies */
|
||||
struct timer_list disengage_request_timer;
|
||||
|
||||
|
||||
/* XPC infrastructure referencing and teardown control */
|
||||
|
||||
volatile u8 setup_state; /* infrastructure setup state */
|
||||
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
|
||||
atomic_t references; /* #of references to infrastructure */
|
||||
|
||||
|
||||
/*
|
||||
* NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
|
||||
* XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
|
||||
@@ -572,7 +544,6 @@ struct xpc_partition {
|
||||
* 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
|
||||
*/
|
||||
|
||||
|
||||
u8 nchannels; /* #of defined channels supported */
|
||||
atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
|
||||
atomic_t nchannels_engaged; /* #of channels engaged with remote part */
|
||||
@@ -586,7 +557,6 @@ struct xpc_partition {
|
||||
u64 remote_GPs_pa; /* phys address of remote partition's local */
|
||||
/* Get/Put values */
|
||||
|
||||
|
||||
/* fields used to pass args when opening or closing a channel */
|
||||
|
||||
void *local_openclose_args_base; /* base address of kmalloc'd space */
|
||||
@@ -596,7 +566,6 @@ struct xpc_partition {
|
||||
/* args */
|
||||
u64 remote_openclose_args_pa; /* phys addr of remote's args */
|
||||
|
||||
|
||||
/* IPI sending, receiving and handling related fields */
|
||||
|
||||
int remote_IPI_nasid; /* nasid of where to send IPIs */
|
||||
@@ -610,7 +579,6 @@ struct xpc_partition {
|
||||
|
||||
spinlock_t IPI_lock; /* IPI handler lock */
|
||||
|
||||
|
||||
/* channel manager related fields */
|
||||
|
||||
atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
|
||||
@@ -618,7 +586,6 @@ struct xpc_partition {
|
||||
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
||||
/* struct xpc_partition act_state values (for XPC HB) */
|
||||
|
||||
#define XPC_P_INACTIVE 0x00 /* partition is not active */
|
||||
@@ -627,11 +594,9 @@ struct xpc_partition {
|
||||
#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
|
||||
#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
|
||||
|
||||
|
||||
#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
|
||||
xpc_deactivate_partition(__LINE__, (_p), (_reason))
|
||||
|
||||
|
||||
/* struct xpc_partition setup_state values */
|
||||
|
||||
#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
|
||||
@@ -639,8 +604,6 @@ struct xpc_partition {
|
||||
#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
|
||||
#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* struct xpc_partition IPI_timer #of seconds to wait before checking for
|
||||
* dropped IPIs. These occur whenever an IPI amo write doesn't complete until
|
||||
@@ -648,22 +611,17 @@ struct xpc_partition {
|
||||
*/
|
||||
#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
|
||||
|
||||
|
||||
/* number of seconds to wait for other partitions to disengage */
|
||||
#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
|
||||
|
||||
/* interval in seconds to print 'waiting disengagement' messages */
|
||||
#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
|
||||
|
||||
|
||||
#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
|
||||
|
||||
|
||||
|
||||
/* found in xp_main.c */
|
||||
extern struct xpc_registration xpc_registrations[];
|
||||
|
||||
|
||||
/* found in xpc_main.c */
|
||||
extern struct device *xpc_part;
|
||||
extern struct device *xpc_chan;
|
||||
@@ -676,7 +634,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
|
||||
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
|
||||
extern void xpc_disconnect_wait(int);
|
||||
|
||||
|
||||
/* found in xpc_partition.c */
|
||||
extern int xpc_exiting;
|
||||
extern struct xpc_vars *xpc_vars;
|
||||
@@ -699,7 +656,6 @@ extern void xpc_deactivate_partition(const int, struct xpc_partition *,
|
||||
enum xpc_retval);
|
||||
extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
|
||||
|
||||
|
||||
/* found in xpc_channel.c */
|
||||
extern void xpc_initiate_connect(int);
|
||||
extern void xpc_initiate_disconnect(int);
|
||||
@@ -719,8 +675,6 @@ extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
|
||||
extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
|
||||
extern void xpc_teardown_infrastructure(struct xpc_partition *);
|
||||
|
||||
|
||||
|
||||
static inline void
|
||||
xpc_wakeup_channel_mgr(struct xpc_partition *part)
|
||||
{
|
||||
@@ -729,8 +683,6 @@ xpc_wakeup_channel_mgr(struct xpc_partition *part)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* These next two inlines are used to keep us from tearing down a channel's
|
||||
* msg queues while a thread may be referencing them.
|
||||
@@ -752,12 +704,9 @@ xpc_msgqueue_deref(struct xpc_channel *ch)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
|
||||
xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
|
||||
|
||||
|
||||
/*
|
||||
* These two inlines are used to keep us from tearing down a partition's
|
||||
* setup infrastructure while a thread may be referencing it.
|
||||
@@ -767,7 +716,6 @@ xpc_part_deref(struct xpc_partition *part)
|
||||
{
|
||||
s32 refs = atomic_dec_return(&part->references);
|
||||
|
||||
|
||||
DBUG_ON(refs < 0);
|
||||
if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
|
||||
wake_up(&part->teardown_wq);
|
||||
@@ -779,7 +727,6 @@ xpc_part_ref(struct xpc_partition *part)
|
||||
{
|
||||
int setup;
|
||||
|
||||
|
||||
atomic_inc(&part->references);
|
||||
setup = (part->setup_state == XPC_P_SETUP);
|
||||
if (!setup) {
|
||||
@@ -788,8 +735,6 @@ xpc_part_ref(struct xpc_partition *part)
|
||||
return setup;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The following macro is to be used for the setting of the reason and
|
||||
* reason_line fields in both the struct xpc_channel and struct xpc_partition
|
||||
@@ -801,8 +746,6 @@ xpc_part_ref(struct xpc_partition *part)
|
||||
(_p)->reason_line = _line; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* This next set of inlines are used to keep track of when a partition is
|
||||
* potentially engaged in accessing memory belonging to another partition.
|
||||
@@ -813,8 +756,8 @@ xpc_mark_partition_engaged(struct xpc_partition *part)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
|
||||
(XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
|
||||
|
||||
(XPC_ENGAGED_PARTITIONS_AMO *
|
||||
sizeof(AMO_t)));
|
||||
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
@@ -828,7 +771,8 @@ xpc_mark_partition_engaged(struct xpc_partition *part)
|
||||
* keep sending IPIs and AMOs to it until the heartbeat times out.
|
||||
*/
|
||||
(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
|
||||
variable), xp_nofault_PIOR_target));
|
||||
variable),
|
||||
xp_nofault_PIOR_target));
|
||||
|
||||
local_irq_restore(irq_flags);
|
||||
}
|
||||
@@ -838,8 +782,8 @@ xpc_mark_partition_disengaged(struct xpc_partition *part)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
|
||||
(XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
|
||||
|
||||
(XPC_ENGAGED_PARTITIONS_AMO *
|
||||
sizeof(AMO_t)));
|
||||
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
@@ -853,7 +797,8 @@ xpc_mark_partition_disengaged(struct xpc_partition *part)
|
||||
* keep sending IPIs and AMOs to it until the heartbeat times out.
|
||||
*/
|
||||
(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
|
||||
variable), xp_nofault_PIOR_target));
|
||||
variable),
|
||||
xp_nofault_PIOR_target));
|
||||
|
||||
local_irq_restore(irq_flags);
|
||||
}
|
||||
@@ -865,7 +810,6 @@ xpc_request_partition_disengage(struct xpc_partition *part)
|
||||
AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
|
||||
(XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
|
||||
|
||||
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
/* set bit corresponding to our partid in remote partition's AMO */
|
||||
@@ -878,7 +822,8 @@ xpc_request_partition_disengage(struct xpc_partition *part)
|
||||
* keep sending IPIs and AMOs to it until the heartbeat times out.
|
||||
*/
|
||||
(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
|
||||
variable), xp_nofault_PIOR_target));
|
||||
variable),
|
||||
xp_nofault_PIOR_target));
|
||||
|
||||
local_irq_restore(irq_flags);
|
||||
}
|
||||
@@ -890,7 +835,6 @@ xpc_cancel_partition_disengage_request(struct xpc_partition *part)
|
||||
AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
|
||||
(XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
|
||||
|
||||
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
/* clear bit corresponding to our partid in remote partition's AMO */
|
||||
@@ -903,7 +847,8 @@ xpc_cancel_partition_disengage_request(struct xpc_partition *part)
|
||||
* keep sending IPIs and AMOs to it until the heartbeat times out.
|
||||
*/
|
||||
(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
|
||||
variable), xp_nofault_PIOR_target));
|
||||
variable),
|
||||
xp_nofault_PIOR_target));
|
||||
|
||||
local_irq_restore(irq_flags);
|
||||
}
|
||||
@@ -913,7 +858,6 @@ xpc_partition_engaged(u64 partid_mask)
|
||||
{
|
||||
AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
|
||||
|
||||
|
||||
/* return our partition's AMO variable ANDed with partid_mask */
|
||||
return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
|
||||
partid_mask);
|
||||
@@ -924,7 +868,6 @@ xpc_partition_disengage_requested(u64 partid_mask)
|
||||
{
|
||||
AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
|
||||
|
||||
|
||||
/* return our partition's AMO variable ANDed with partid_mask */
|
||||
return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
|
||||
partid_mask);
|
||||
@@ -935,7 +878,6 @@ xpc_clear_partition_engaged(u64 partid_mask)
|
||||
{
|
||||
AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
|
||||
|
||||
|
||||
/* clear bit(s) based on partid_mask in our partition's AMO */
|
||||
FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
|
||||
~partid_mask);
|
||||
@@ -946,14 +888,11 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
|
||||
{
|
||||
AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
|
||||
|
||||
|
||||
/* clear bit(s) based on partid_mask in our partition's AMO */
|
||||
FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
|
||||
~partid_mask);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The following set of macros and inlines are used for the sending and
|
||||
* receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
|
||||
@@ -967,14 +906,12 @@ xpc_IPI_receive(AMO_t *amo)
|
||||
return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
|
||||
}
|
||||
|
||||
|
||||
static inline enum xpc_retval
|
||||
xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
|
||||
@@ -994,7 +931,6 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
|
||||
return ((ret == 0) ? xpcSuccess : xpcPioReadError);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IPIs associated with SGI_XPC_ACTIVATE IRQ.
|
||||
*/
|
||||
@@ -1011,7 +947,6 @@ xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
|
||||
AMO_t *amos = (AMO_t *)__va(amos_page_pa +
|
||||
(XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
|
||||
|
||||
|
||||
(void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
|
||||
to_phys_cpuid, SGI_XPC_ACTIVATE);
|
||||
}
|
||||
@@ -1027,7 +962,8 @@ static inline void
|
||||
xpc_IPI_send_activated(struct xpc_partition *part)
|
||||
{
|
||||
xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
|
||||
part->remote_act_nasid, part->remote_act_phys_cpuid);
|
||||
part->remote_act_nasid,
|
||||
part->remote_act_phys_cpuid);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -1041,10 +977,10 @@ static inline void
|
||||
xpc_IPI_send_disengage(struct xpc_partition *part)
|
||||
{
|
||||
xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
|
||||
part->remote_act_nasid, part->remote_act_phys_cpuid);
|
||||
part->remote_act_nasid,
|
||||
part->remote_act_phys_cpuid);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IPIs associated with SGI_XPC_NOTIFY IRQ.
|
||||
*/
|
||||
@@ -1063,13 +999,11 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
|
||||
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
if (likely(part->act_state != XPC_P_DEACTIVATING)) {
|
||||
ret = xpc_IPI_send(part->remote_IPI_amo_va,
|
||||
(u64)ipi_flag << (ch->number * 8),
|
||||
part->remote_IPI_nasid,
|
||||
part->remote_IPI_phys_cpuid,
|
||||
SGI_XPC_NOTIFY);
|
||||
part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
|
||||
dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
|
||||
ipi_flag_string, ch->partid, ch->number, ret);
|
||||
if (unlikely(ret != xpcSuccess)) {
|
||||
@@ -1084,7 +1018,6 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Make it look like the remote partition, which is associated with the
|
||||
* specified channel, sent us an IPI. This faked IPI will be handled
|
||||
@@ -1099,14 +1032,12 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
|
||||
{
|
||||
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
||||
|
||||
|
||||
FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
|
||||
FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
|
||||
dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
|
||||
ipi_flag_string, ch->partid, ch->number);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The sending and receiving of IPIs includes the setting of an AMO variable
|
||||
* to indicate the reason the IPI was sent. The 64-bit variable is divided
|
||||
@@ -1121,7 +1052,6 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
|
||||
#define XPC_IPI_OPENREPLY 0x08
|
||||
#define XPC_IPI_MSGREQUEST 0x10
|
||||
|
||||
|
||||
/* given an AMO variable and a channel#, get its associated IPI flags */
|
||||
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
|
||||
#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
|
||||
@@ -1129,13 +1059,11 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
|
||||
#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
|
||||
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010))
|
||||
|
||||
|
||||
static inline void
|
||||
xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
|
||||
{
|
||||
struct xpc_openclose_args *args = ch->local_openclose_args;
|
||||
|
||||
|
||||
args->reason = ch->reason;
|
||||
|
||||
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
|
||||
@@ -1152,7 +1080,6 @@ xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
|
||||
{
|
||||
struct xpc_openclose_args *args = ch->local_openclose_args;
|
||||
|
||||
|
||||
args->msg_size = ch->msg_size;
|
||||
args->local_nentries = ch->local_nentries;
|
||||
|
||||
@@ -1164,7 +1091,6 @@ xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
|
||||
{
|
||||
struct xpc_openclose_args *args = ch->local_openclose_args;
|
||||
|
||||
|
||||
args->remote_nentries = ch->remote_nentries;
|
||||
args->local_nentries = ch->local_nentries;
|
||||
args->local_msgqueue_pa = __pa(ch->local_msgqueue);
|
||||
@@ -1184,7 +1110,6 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
|
||||
XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Memory for XPC's AMO variables is allocated by the MSPEC driver. These
|
||||
* pages are located in the lowest granule. The lowest granule uses 4k pages
|
||||
@@ -1201,13 +1126,10 @@ xpc_IPI_init(int index)
|
||||
{
|
||||
AMO_t *amo = xpc_vars->amos_page + index;
|
||||
|
||||
|
||||
(void)xpc_IPI_receive(amo); /* clear AMO variable */
|
||||
return amo;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline enum xpc_retval
|
||||
xpc_map_bte_errors(bte_result_t error)
|
||||
{
|
||||
@@ -1220,22 +1142,31 @@ xpc_map_bte_errors(bte_result_t error)
|
||||
return xpcBteUnmappedError;
|
||||
}
|
||||
switch (error) {
|
||||
case BTE_SUCCESS: return xpcSuccess;
|
||||
case BTEFAIL_DIR: return xpcBteDirectoryError;
|
||||
case BTEFAIL_POISON: return xpcBtePoisonError;
|
||||
case BTEFAIL_WERR: return xpcBteWriteError;
|
||||
case BTEFAIL_ACCESS: return xpcBteAccessError;
|
||||
case BTEFAIL_PWERR: return xpcBtePWriteError;
|
||||
case BTEFAIL_PRERR: return xpcBtePReadError;
|
||||
case BTEFAIL_TOUT: return xpcBteTimeOutError;
|
||||
case BTEFAIL_XTERR: return xpcBteXtalkError;
|
||||
case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable;
|
||||
default: return xpcBteUnmappedError;
|
||||
case BTE_SUCCESS:
|
||||
return xpcSuccess;
|
||||
case BTEFAIL_DIR:
|
||||
return xpcBteDirectoryError;
|
||||
case BTEFAIL_POISON:
|
||||
return xpcBtePoisonError;
|
||||
case BTEFAIL_WERR:
|
||||
return xpcBteWriteError;
|
||||
case BTEFAIL_ACCESS:
|
||||
return xpcBteAccessError;
|
||||
case BTEFAIL_PWERR:
|
||||
return xpcBtePWriteError;
|
||||
case BTEFAIL_PRERR:
|
||||
return xpcBtePReadError;
|
||||
case BTEFAIL_TOUT:
|
||||
return xpcBteTimeOutError;
|
||||
case BTEFAIL_XTERR:
|
||||
return xpcBteXtalkError;
|
||||
case BTEFAIL_NOTAVAIL:
|
||||
return xpcBteNotAvailable;
|
||||
default:
|
||||
return xpcBteUnmappedError;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Check to see if there is any channel activity to/from the specified
|
||||
* partition.
|
||||
@@ -1246,7 +1177,6 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
|
||||
u64 IPI_amo;
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
|
||||
if (IPI_amo == 0) {
|
||||
return;
|
||||
@@ -1262,6 +1192,4 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
|
||||
xpc_wakeup_channel_mgr(part);
|
||||
}
|
||||
|
||||
|
||||
#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
|
||||
|
||||
|
@@ -6,7 +6,6 @@
|
||||
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Cross Partition Communication (XPC) partition support.
|
||||
*
|
||||
@@ -16,7 +15,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/cache.h>
|
||||
@@ -30,11 +28,9 @@
|
||||
#include <asm/sn/addrs.h>
|
||||
#include "xpc.h"
|
||||
|
||||
|
||||
/* XPC is exiting flag */
|
||||
int xpc_exiting;
|
||||
|
||||
|
||||
/* SH_IPI_ACCESS shub register value on startup */
|
||||
static u64 xpc_sh1_IPI_access;
|
||||
static u64 xpc_sh2_IPI_access0;
|
||||
@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1;
|
||||
static u64 xpc_sh2_IPI_access2;
|
||||
static u64 xpc_sh2_IPI_access3;
|
||||
|
||||
|
||||
/* original protection values for each node */
|
||||
u64 xpc_prot_vec[MAX_NUMNODES];
|
||||
|
||||
|
||||
/* this partition's reserved page pointers */
|
||||
struct xpc_rsvd_page *xpc_rsvd_page;
|
||||
static u64 *xpc_part_nasids;
|
||||
@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part;
|
||||
static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
|
||||
static int xp_nasid_mask_words; /* actual size in words of nasid mask */
|
||||
|
||||
|
||||
/*
|
||||
* For performance reasons, each entry of xpc_partitions[] is cacheline
|
||||
* aligned. And xpc_partitions[] is padded with an additional entry at the
|
||||
@@ -66,7 +59,6 @@ static int xp_nasid_mask_words; /* actual size in words of nasid mask */
|
||||
*/
|
||||
struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
|
||||
|
||||
|
||||
/*
|
||||
* Generic buffer used to store a local copy of portions of a remote
|
||||
* partition's reserved page (either its header and part_nasids mask,
|
||||
@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
|
||||
char *xpc_remote_copy_buffer;
|
||||
void *xpc_remote_copy_buffer_base;
|
||||
|
||||
|
||||
/*
|
||||
* Guarantee that the kmalloc'd memory is cacheline aligned.
|
||||
*/
|
||||
@@ -100,7 +91,6 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
|
||||
return (void *)L1_CACHE_ALIGN((u64)*base);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Given a nasid, get the physical address of the partition's reserved page
|
||||
* for that nasid. This function returns 0 on any error.
|
||||
@@ -117,7 +107,6 @@ xpc_get_rsvd_page_pa(int nasid)
|
||||
u64 buf_len = 0;
|
||||
void *buf_base = NULL;
|
||||
|
||||
|
||||
while (1) {
|
||||
|
||||
status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
|
||||
@@ -135,7 +124,8 @@ xpc_get_rsvd_page_pa(int nasid)
|
||||
kfree(buf_base);
|
||||
buf_len = L1_CACHE_ALIGN(len);
|
||||
buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
|
||||
GFP_KERNEL, &buf_base);
|
||||
GFP_KERNEL,
|
||||
&buf_base);
|
||||
if (buf_base == NULL) {
|
||||
dev_err(xpc_part, "unable to kmalloc "
|
||||
"len=0x%016lx\n", buf_len);
|
||||
@@ -162,7 +152,6 @@ xpc_get_rsvd_page_pa(int nasid)
|
||||
return rp_pa;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Fill the partition reserved page with the information needed by
|
||||
* other partitions to discover we are alive and establish initial
|
||||
@@ -176,7 +165,6 @@ xpc_rsvd_page_init(void)
|
||||
u64 rp_pa, nasid_array = 0;
|
||||
int i, ret;
|
||||
|
||||
|
||||
/* get the local reserved page's address */
|
||||
|
||||
preempt_disable();
|
||||
@@ -235,7 +223,8 @@ xpc_rsvd_page_init(void)
|
||||
*/
|
||||
if (!enable_shub_wars_1_1()) {
|
||||
ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
|
||||
PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
|
||||
PAGE_SIZE,
|
||||
SN_MEMPROT_ACCESS_CLASS_1,
|
||||
&nasid_array);
|
||||
if (ret != 0) {
|
||||
dev_err(xpc_part, "can't change memory "
|
||||
@@ -270,7 +259,6 @@ xpc_rsvd_page_init(void)
|
||||
xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
|
||||
xpc_vars->amos_page = amos_page; /* save for next load of XPC */
|
||||
|
||||
|
||||
/* clear xpc_vars_part */
|
||||
memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
|
||||
XP_MAX_PARTITIONS);
|
||||
@@ -296,7 +284,6 @@ xpc_rsvd_page_init(void)
|
||||
return rp;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Change protections to allow IPI operations (and AMO operations on
|
||||
* Shub 1.1 systems).
|
||||
@@ -307,7 +294,6 @@ xpc_allow_IPI_ops(void)
|
||||
int node;
|
||||
int nasid;
|
||||
|
||||
|
||||
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
|
||||
|
||||
if (is_shub2()) {
|
||||
@@ -348,12 +334,15 @@ xpc_allow_IPI_ops(void)
|
||||
if (enable_shub_wars_1_1()) {
|
||||
/* open up everything */
|
||||
xpc_prot_vec[node] = (u64)HUB_L((u64 *)
|
||||
GLOBAL_MMR_ADDR(nasid,
|
||||
GLOBAL_MMR_ADDR
|
||||
(nasid,
|
||||
SH1_MD_DQLP_MMR_DIR_PRIVEC0));
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
|
||||
HUB_S((u64 *)
|
||||
GLOBAL_MMR_ADDR(nasid,
|
||||
SH1_MD_DQLP_MMR_DIR_PRIVEC0),
|
||||
-1UL);
|
||||
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
|
||||
HUB_S((u64 *)
|
||||
GLOBAL_MMR_ADDR(nasid,
|
||||
SH1_MD_DQRP_MMR_DIR_PRIVEC0),
|
||||
-1UL);
|
||||
}
|
||||
@@ -361,7 +350,6 @@ xpc_allow_IPI_ops(void)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Restrict protections to disallow IPI operations (and AMO operations on
|
||||
* Shub 1.1 systems).
|
||||
@@ -372,7 +360,6 @@ xpc_restrict_IPI_ops(void)
|
||||
int node;
|
||||
int nasid;
|
||||
|
||||
|
||||
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
|
||||
|
||||
if (is_shub2()) {
|
||||
@@ -408,7 +395,6 @@ xpc_restrict_IPI_ops(void)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* At periodic intervals, scan through all active partitions and ensure
|
||||
* their heartbeat is still active. If not, the partition is deactivated.
|
||||
@@ -421,7 +407,6 @@ xpc_check_remote_hb(void)
|
||||
partid_t partid;
|
||||
bte_result_t bres;
|
||||
|
||||
|
||||
remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
|
||||
|
||||
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
||||
@@ -470,7 +455,6 @@ xpc_check_remote_hb(void)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get a copy of a portion of the remote partition's rsvd page.
|
||||
*
|
||||
@@ -484,7 +468,6 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
|
||||
{
|
||||
int bres, i;
|
||||
|
||||
|
||||
/* get the reserved page's physical address */
|
||||
|
||||
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
|
||||
@@ -492,7 +475,6 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
|
||||
return xpcNoRsvdPageAddr;
|
||||
}
|
||||
|
||||
|
||||
/* pull over the reserved page header and part_nasids mask */
|
||||
bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
|
||||
XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
|
||||
@@ -501,17 +483,14 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
|
||||
return xpc_map_bte_errors(bres);
|
||||
}
|
||||
|
||||
|
||||
if (discovered_nasids != NULL) {
|
||||
u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
|
||||
|
||||
|
||||
for (i = 0; i < xp_nasid_mask_words; i++) {
|
||||
discovered_nasids[i] |= remote_part_nasids[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* check that the partid is for another partition */
|
||||
|
||||
if (remote_rp->partid < 1 ||
|
||||
@@ -523,7 +502,6 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
|
||||
return xpcLocalPartid;
|
||||
}
|
||||
|
||||
|
||||
if (XPC_VERSION_MAJOR(remote_rp->version) !=
|
||||
XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
|
||||
return xpcBadVersion;
|
||||
@@ -532,7 +510,6 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
|
||||
return xpcSuccess;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get a copy of the remote partition's XPC variables from the reserved page.
|
||||
*
|
||||
@@ -544,7 +521,6 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
|
||||
{
|
||||
int bres;
|
||||
|
||||
|
||||
if (remote_vars_pa == 0) {
|
||||
return xpcVarsNotSet;
|
||||
}
|
||||
@@ -564,7 +540,6 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
|
||||
return xpcSuccess;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Update the remote partition's info.
|
||||
*/
|
||||
@@ -613,7 +588,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
|
||||
part->remote_vars_version);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Prior code has determined the nasid which generated an IPI. Inspect
|
||||
* that nasid to determine if its partition needs to be activated or
|
||||
@@ -643,7 +617,6 @@ xpc_identify_act_IRQ_req(int nasid)
|
||||
struct xpc_partition *part;
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
/* pull over the reserved page structure */
|
||||
|
||||
remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
|
||||
@@ -663,7 +636,6 @@ xpc_identify_act_IRQ_req(int nasid)
|
||||
partid = remote_rp->partid;
|
||||
part = &xpc_partitions[partid];
|
||||
|
||||
|
||||
/* pull over the cross partition variables */
|
||||
|
||||
remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
|
||||
@@ -678,15 +650,13 @@ xpc_identify_act_IRQ_req(int nasid)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
part->act_IRQ_rcvd++;
|
||||
|
||||
dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
|
||||
"%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
|
||||
remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
|
||||
|
||||
if (xpc_partition_disengaged(part) &&
|
||||
part->act_state == XPC_P_INACTIVE) {
|
||||
if (xpc_partition_disengaged(part) && part->act_state == XPC_P_INACTIVE) {
|
||||
|
||||
xpc_update_partition_info(part, remote_rp_version,
|
||||
&remote_rp_stamp, remote_rp_pa,
|
||||
@@ -722,8 +692,7 @@ xpc_identify_act_IRQ_req(int nasid)
|
||||
/* see if the other side rebooted */
|
||||
if (part->remote_amos_page_pa ==
|
||||
remote_vars->amos_page_pa &&
|
||||
xpc_hb_allowed(sn_partition_id,
|
||||
remote_vars)) {
|
||||
xpc_hb_allowed(sn_partition_id, remote_vars)) {
|
||||
/* doesn't look that way, so ignore the IPI */
|
||||
return;
|
||||
}
|
||||
@@ -778,8 +747,9 @@ xpc_identify_act_IRQ_req(int nasid)
|
||||
partid));
|
||||
|
||||
xpc_update_partition_info(part, remote_rp_version,
|
||||
&remote_rp_stamp, remote_rp_pa,
|
||||
remote_vars_pa, remote_vars);
|
||||
&remote_rp_stamp,
|
||||
remote_rp_pa, remote_vars_pa,
|
||||
remote_vars);
|
||||
reactivate = 1;
|
||||
}
|
||||
}
|
||||
@@ -800,7 +770,6 @@ xpc_identify_act_IRQ_req(int nasid)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Loop through the activation AMO variables and process any bits
|
||||
* which are set. Each bit indicates a nasid sending a partition
|
||||
@@ -817,10 +786,8 @@ xpc_identify_act_IRQ_sender(void)
|
||||
int n_IRQs_detected = 0;
|
||||
AMO_t *act_amos;
|
||||
|
||||
|
||||
act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
|
||||
|
||||
|
||||
/* scan through act AMO variable looking for non-zero entries */
|
||||
for (word = 0; word < xp_nasid_mask_words; word++) {
|
||||
|
||||
@@ -837,7 +804,6 @@ xpc_identify_act_IRQ_sender(void)
|
||||
dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
|
||||
nasid_mask);
|
||||
|
||||
|
||||
/*
|
||||
* If this nasid has been added to the machine since
|
||||
* our partition was reset, this will retain the
|
||||
@@ -846,7 +812,6 @@ xpc_identify_act_IRQ_sender(void)
|
||||
*/
|
||||
xpc_mach_nasids[word] |= nasid_mask;
|
||||
|
||||
|
||||
/* locate the nasid(s) which sent interrupts */
|
||||
|
||||
for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
|
||||
@@ -862,7 +827,6 @@ xpc_identify_act_IRQ_sender(void)
|
||||
return n_IRQs_detected;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* See if the other side has responded to a partition disengage request
|
||||
* from us.
|
||||
@@ -873,7 +837,6 @@ xpc_partition_disengaged(struct xpc_partition *part)
|
||||
partid_t partid = XPC_PARTID(part);
|
||||
int disengaged;
|
||||
|
||||
|
||||
disengaged = (xpc_partition_engaged(1UL << partid) == 0);
|
||||
if (part->disengage_request_timeout) {
|
||||
if (!disengaged) {
|
||||
@@ -914,7 +877,6 @@ xpc_partition_disengaged(struct xpc_partition *part)
|
||||
return disengaged;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Mark specified partition as active.
|
||||
*/
|
||||
@@ -924,7 +886,6 @@ xpc_mark_partition_active(struct xpc_partition *part)
|
||||
unsigned long irq_flags;
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
|
||||
|
||||
spin_lock_irqsave(&part->act_lock, irq_flags);
|
||||
@@ -940,7 +901,6 @@ xpc_mark_partition_active(struct xpc_partition *part)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Notify XPC that the partition is down.
|
||||
*/
|
||||
@@ -950,7 +910,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
spin_lock_irqsave(&part->act_lock, irq_flags);
|
||||
|
||||
if (part->act_state == XPC_P_INACTIVE) {
|
||||
@@ -994,7 +953,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
|
||||
xpc_partition_going_down(part, reason);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Mark specified partition as inactive.
|
||||
*/
|
||||
@@ -1003,7 +961,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
|
||||
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
|
||||
XPC_PARTID(part));
|
||||
|
||||
@@ -1013,7 +970,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
|
||||
part->remote_rp_pa = 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SAL has provided a partition and machine mask. The partition mask
|
||||
* contains a bit for each even nasid in our partition. The machine
|
||||
@@ -1041,7 +997,6 @@ xpc_discovery(void)
|
||||
u64 *discovered_nasids;
|
||||
enum xpc_retval ret;
|
||||
|
||||
|
||||
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
|
||||
xp_nasid_mask_bytes,
|
||||
GFP_KERNEL, &remote_rp_base);
|
||||
@@ -1050,7 +1005,6 @@ xpc_discovery(void)
|
||||
}
|
||||
remote_vars = (struct xpc_vars *)remote_rp;
|
||||
|
||||
|
||||
discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
|
||||
GFP_KERNEL);
|
||||
if (discovered_nasids == NULL) {
|
||||
@@ -1088,8 +1042,7 @@ xpc_discovery(void)
|
||||
dev_dbg(xpc_part, "searching region %d\n", region);
|
||||
|
||||
for (nasid = (region * region_size * 2);
|
||||
nasid < ((region + 1) * region_size * 2);
|
||||
nasid += 2) {
|
||||
nasid < ((region + 1) * region_size * 2); nasid += 2) {
|
||||
|
||||
if ((volatile int)xpc_exiting) {
|
||||
break;
|
||||
@@ -1097,7 +1050,6 @@ xpc_discovery(void)
|
||||
|
||||
dev_dbg(xpc_part, "checking nasid %d\n", nasid);
|
||||
|
||||
|
||||
if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
|
||||
dev_dbg(xpc_part, "PROM indicates Nasid %d is "
|
||||
"part of the local partition; skipping "
|
||||
@@ -1119,7 +1071,6 @@ xpc_discovery(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
/* pull over the reserved page structure */
|
||||
|
||||
ret = xpc_get_remote_rp(nasid, discovered_nasids,
|
||||
@@ -1140,7 +1091,6 @@ xpc_discovery(void)
|
||||
partid = remote_rp->partid;
|
||||
part = &xpc_partitions[partid];
|
||||
|
||||
|
||||
/* pull over the cross partition variables */
|
||||
|
||||
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
|
||||
@@ -1171,10 +1121,10 @@ xpc_discovery(void)
|
||||
* get the same page for remote_act_amos_pa after
|
||||
* module reloads and system reboots.
|
||||
*/
|
||||
if (sn_register_xp_addr_region(
|
||||
remote_vars->amos_page_pa,
|
||||
PAGE_SIZE, 1) < 0) {
|
||||
dev_dbg(xpc_part, "partition %d failed to "
|
||||
if (sn_register_xp_addr_region
|
||||
(remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
|
||||
dev_dbg(xpc_part,
|
||||
"partition %d failed to "
|
||||
"register xp_addr region 0x%016lx\n",
|
||||
partid, remote_vars->amos_page_pa);
|
||||
|
||||
@@ -1209,7 +1159,6 @@ xpc_discovery(void)
|
||||
kfree(remote_rp_base);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Given a partid, get the nasids owned by that partition from the
|
||||
* remote partition's reserved page.
|
||||
@@ -1221,7 +1170,6 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
|
||||
u64 part_nasid_pa;
|
||||
int bte_res;
|
||||
|
||||
|
||||
part = &xpc_partitions[partid];
|
||||
if (part->remote_rp_pa == 0) {
|
||||
return xpcPartitionDown;
|
||||
@@ -1232,8 +1180,8 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
|
||||
part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
|
||||
|
||||
bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask,
|
||||
xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
|
||||
xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
|
||||
NULL);
|
||||
|
||||
return xpc_map_bte_errors(bte_res);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user