EDAC updates for 3.9

Only one: AMD F16h MCE decoding enablement from Jacob Shin.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABAgAGBQJRI2t7AAoJEBLB8Bhh3lVKmLAP/RbYNVVvLZZxXqB6heCkX7D3
 Avhi4GtuHqjY+79nqBoJEH9HjgzBzL6ffsJGPF3gtXrWybi6nzZkqog5QGRQFfGS
 gIoldbfP9MMF/RrMHbUF5x9Ha9EcdudDgYE3eqCq6KUfOMlUrBRECaU5YRrB+FKR
 qsuQNmF6J3uMPYwO9oGhTpA/vPwwapFaKgm+KND8q5h5JcpJrRpooKNQc1DheW+x
 nfYFpmSBW8eamVwV5DTjqKVhKeG3gB/3i6uSTpLvh4i0ZmV2vQ+drwC3aU0Eh0Q4
 wnslEpQENjODsvbZH6b0j2WTDgBGKEpALRkMUDTnnqvYrR96cV02MWUfp6cbKYCv
 +d/iPt3hEBCyDTDzfP66N+1b+Wqls4Dx8lhhqLdPhsIpY2Qu8OQ8/mjyIIs12qK5
 g9w3lsfy3shWmdsK/Ehd0IhNt1bzNV+63CINA2isC2QAp0Eqecj3jKrXor+MTPu1
 uRY3wM+8CrdeFNNhhhsMQYIb4+DFGLgMwY5mV6z8ceFJAjxvyUxjObnSMopjBKog
 9+JcyEHCADbpHsRup/zRIoGtSs+44sQ6l8l7pq6d4K4UfkG7Biq9lcxThID7rImn
 Rl6MOJVmJuM5n9JVIU4/bmhjfuTcI+gdshexDEH2HnqaXxd7ceIbnrFWFaBZEO5t
 t3WasBuIb30g1XCQmNT6
 =LbcU
 -----END PGP SIGNATURE-----

Merge tag 'edac_3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp

Pull EDAC updates from Borislav Petkov:
 "Mostly AMD's side of EDAC.  It is basically a new family enablement
  stuff: AMD F16h MCE decoding enablement from Jacob Shin.  The rest is
  trivial cleanups."

* tag 'edac_3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp:
  mpc85xx_edac: Fix typo
  EDAC, MCE, AMD: Remove unneeded exports
  EDAC, MCE, AMD: Add MCE decoding support for Family 16h
  EDAC, MCE, AMD: Make MC2 decoding per-family
  amd64_edac: Remove dead code
This commit is contained in:
Linus Torvalds 2013-02-20 11:28:50 -08:00
commit 55529fa576
4 changed files with 120 additions and 168 deletions

View File

@ -602,111 +602,6 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
return input_addr;
}
/*
* @input_addr is an InputAddr associated with the node represented by mci.
* Translate @input_addr to a DramAddr and return the result.
*/
static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
{
struct amd64_pvt *pvt;
unsigned node_id, intlv_shift;
u64 bits, dram_addr;
u32 intlv_sel;
/*
* Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
* shows how to translate a DramAddr to an InputAddr. Here we reverse
* this procedure. When translating from a DramAddr to an InputAddr, the
* bits used for node interleaving are discarded. Here we recover these
* bits from the IntlvSel field of the DRAM Limit register (section
* 3.4.4.2) for the node that input_addr is associated with.
*/
pvt = mci->pvt_info;
node_id = pvt->mc_node_id;
BUG_ON(node_id > 7);
intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
(unsigned long)input_addr);
return input_addr;
}
bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
(input_addr & 0xfff);
intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
(unsigned long)input_addr,
(unsigned long)dram_addr, intlv_shift);
return dram_addr;
}
/*
* @dram_addr is a DramAddr that maps to the node represented by mci. Convert
* @dram_addr to a SysAddr.
*/
static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 hole_base, hole_offset, hole_size, base, sys_addr;
int ret = 0;
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
&hole_size);
if (!ret) {
if ((dram_addr >= hole_base) &&
(dram_addr < (hole_base + hole_size))) {
sys_addr = dram_addr + hole_offset;
edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
(unsigned long)dram_addr,
(unsigned long)sys_addr);
return sys_addr;
}
}
base = get_dram_base(pvt, pvt->mc_node_id);
sys_addr = dram_addr + base;
/*
* The sys_addr we have computed up to this point is a 40-bit value
* because the k8 deals with 40-bit values. However, the value we are
* supposed to return is a full 64-bit physical address. The AMD
* x86-64 architecture specifies that the most significant implemented
* address bit through bit 63 of a physical address must be either all
* 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
* 64-bit value below. See section 3.4.2 of AMD publication 24592:
* AMD x86-64 Architecture Programmer's Manual Volume 1 Application
* Programming.
*/
sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
pvt->mc_node_id, (unsigned long)dram_addr,
(unsigned long)sys_addr);
return sys_addr;
}
/*
* @input_addr is an InputAddr associated with the node given by mci. Translate
* @input_addr to a SysAddr.
*/
static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
u64 input_addr)
{
return dram_addr_to_sys_addr(mci,
input_addr_to_dram_addr(mci, input_addr));
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
struct err_info *err)

View File

@ -39,30 +39,28 @@ EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
*/
/* transaction type */
const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
EXPORT_SYMBOL_GPL(tt_msgs);
static const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
/* cache level */
const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
EXPORT_SYMBOL_GPL(ll_msgs);
static const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
/* memory transaction type */
const char * const rrrr_msgs[] = {
static const char * const rrrr_msgs[] = {
"GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
};
EXPORT_SYMBOL_GPL(rrrr_msgs);
/* participating processor */
const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
EXPORT_SYMBOL_GPL(pp_msgs);
/* request timeout */
const char * const to_msgs[] = { "no timeout", "timed out" };
EXPORT_SYMBOL_GPL(to_msgs);
static const char * const to_msgs[] = { "no timeout", "timed out" };
/* memory or i/o */
const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
EXPORT_SYMBOL_GPL(ii_msgs);
static const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
/* internal error type */
static const char * const uu_msgs[] = { "RESV", "RESV", "HWA", "RESV" };
static const char * const f15h_mc1_mce_desc[] = {
"UC during a demand linefill from L2",
@ -176,7 +174,7 @@ static bool k8_mc0_mce(u16 ec, u8 xec)
return f10h_mc0_mce(ec, xec);
}
static bool f14h_mc0_mce(u16 ec, u8 xec)
static bool cat_mc0_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
@ -330,22 +328,28 @@ static bool k8_mc1_mce(u16 ec, u8 xec)
return ret;
}
static bool f14h_mc1_mce(u16 ec, u8 xec)
static bool cat_mc1_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
if (MEM_ERROR(ec)) {
if (TT(ec) != 0 || LL(ec) != 1)
ret = false;
if (!MEM_ERROR(ec))
return false;
if (TT(ec) != TT_INSTR)
return false;
if (r4 == R4_IRD)
pr_cont("Data/tag array parity error for a tag hit.\n");
else if (r4 == R4_SNOOP)
pr_cont("Tag error during snoop/victimization.\n");
else if (xec == 0x0)
pr_cont("Tag parity error from victim castout.\n");
else if (xec == 0x2)
pr_cont("Microcode patch RAM parity error.\n");
else
ret = false;
if (r4 == R4_IRD)
pr_cont("Data/tag array parity error for a tag hit.\n");
else if (r4 == R4_SNOOP)
pr_cont("Tag error during snoop/victimization.\n");
else
ret = false;
}
return ret;
}
@ -399,12 +403,9 @@ static void decode_mc1_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
}
static void decode_mc2_mce(struct mce *m)
static bool k8_mc2_mce(u16 ec, u8 xec)
{
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
pr_emerg(HW_ERR "MC2 Error");
bool ret = true;
if (xec == 0x1)
pr_cont(" in the write data buffers.\n");
@ -429,24 +430,18 @@ static void decode_mc2_mce(struct mce *m)
pr_cont(": %s parity/ECC error during data "
"access from L2.\n", R4_MSG(ec));
else
goto wrong_mc2_mce;
ret = false;
} else
goto wrong_mc2_mce;
ret = false;
} else
goto wrong_mc2_mce;
ret = false;
return;
wrong_mc2_mce:
pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
return ret;
}
static void decode_f15_mc2_mce(struct mce *m)
static bool f15h_mc2_mce(u16 ec, u8 xec)
{
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
pr_emerg(HW_ERR "MC2 Error: ");
bool ret = true;
if (TLB_ERROR(ec)) {
if (xec == 0x0)
@ -454,10 +449,10 @@ static void decode_f15_mc2_mce(struct mce *m)
else if (xec == 0x1)
pr_cont("Poison data provided for TLB fill.\n");
else
goto wrong_f15_mc2_mce;
ret = false;
} else if (BUS_ERROR(ec)) {
if (xec > 2)
goto wrong_f15_mc2_mce;
ret = false;
pr_cont("Error during attempted NB data read.\n");
} else if (MEM_ERROR(ec)) {
@ -471,14 +466,63 @@ static void decode_f15_mc2_mce(struct mce *m)
break;
default:
goto wrong_f15_mc2_mce;
ret = false;
}
}
return;
return ret;
}
wrong_f15_mc2_mce:
pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
static bool f16h_mc2_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
if (!MEM_ERROR(ec))
return false;
switch (xec) {
case 0x04 ... 0x05:
pr_cont("%cBUFF parity error.\n", (r4 == R4_RD) ? 'I' : 'O');
break;
case 0x09 ... 0x0b:
case 0x0d ... 0x0f:
pr_cont("ECC error in L2 tag (%s).\n",
((r4 == R4_GEN) ? "BankReq" :
((r4 == R4_SNOOP) ? "Prb" : "Fill")));
break;
case 0x10 ... 0x19:
case 0x1b:
pr_cont("ECC error in L2 data array (%s).\n",
(((r4 == R4_RD) && !(xec & 0x3)) ? "Hit" :
((r4 == R4_GEN) ? "Attr" :
((r4 == R4_EVICT) ? "Vict" : "Fill"))));
break;
case 0x1c ... 0x1d:
case 0x1f:
pr_cont("Parity error in L2 attribute bits (%s).\n",
((r4 == R4_RD) ? "Hit" :
((r4 == R4_GEN) ? "Attr" : "Fill")));
break;
default:
return false;
}
return true;
}
static void decode_mc2_mce(struct mce *m)
{
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
pr_emerg(HW_ERR "MC2 Error: ");
if (!fam_ops->mc2_mce(ec, xec))
pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
static void decode_mc3_mce(struct mce *m)
@ -547,7 +591,7 @@ static void decode_mc4_mce(struct mce *m)
return;
case 0x19:
if (boot_cpu_data.x86 == 0x15)
if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16)
pr_cont("Compute Unit Data Error.\n");
else
goto wrong_mc4_mce;
@ -633,6 +677,10 @@ static void decode_mc6_mce(struct mce *m)
static inline void amd_decode_err_code(u16 ec)
{
if (INT_ERROR(ec)) {
pr_emerg(HW_ERR "internal: %s\n", UU_MSG(ec));
return;
}
pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
@ -702,10 +750,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
break;
case 2:
if (c->x86 == 0x15)
decode_f15_mc2_mce(m);
else
decode_mc2_mce(m);
decode_mc2_mce(m);
break;
case 3:
@ -740,7 +785,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
if (c->x86 == 0x15)
if (c->x86 == 0x15 || c->x86 == 0x16)
pr_cont("|%s|%s",
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
@ -772,7 +817,7 @@ static int __init mce_amd_init(void)
if (c->x86_vendor != X86_VENDOR_AMD)
return 0;
if (c->x86 < 0xf || c->x86 > 0x15)
if (c->x86 < 0xf || c->x86 > 0x16)
return 0;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@ -783,33 +828,46 @@ static int __init mce_amd_init(void)
case 0xf:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x10:
fam_ops->mc0_mce = f10h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x11:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x12:
fam_ops->mc0_mce = f12h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x14:
nb_err_cpumask = 0x3;
fam_ops->mc0_mce = f14h_mc0_mce;
fam_ops->mc1_mce = f14h_mc1_mce;
fam_ops->mc0_mce = cat_mc0_mce;
fam_ops->mc1_mce = cat_mc1_mce;
fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = 0x1f;
fam_ops->mc0_mce = f15h_mc0_mce;
fam_ops->mc1_mce = f15h_mc1_mce;
fam_ops->mc2_mce = f15h_mc2_mce;
break;
case 0x16:
xec_mask = 0x1f;
fam_ops->mc0_mce = cat_mc0_mce;
fam_ops->mc1_mce = cat_mc1_mce;
fam_ops->mc2_mce = f16h_mc2_mce;
break;
default:

View File

@ -14,6 +14,7 @@
#define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
#define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
#define BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
#define INT_ERROR(x) (((x) & 0xF4FF) == 0x0400)
#define TT(x) (((x) >> 2) & 0x3)
#define TT_MSG(x) tt_msgs[TT(x)]
@ -25,6 +26,8 @@
#define TO_MSG(x) to_msgs[TO(x)]
#define PP(x) (((x) >> 9) & 0x3)
#define PP_MSG(x) pp_msgs[PP(x)]
#define UU(x) (((x) >> 8) & 0x3)
#define UU_MSG(x) uu_msgs[UU(x)]
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
@ -32,6 +35,8 @@
#define MCI_STATUS_DEFERRED BIT_64(44)
#define MCI_STATUS_POISON BIT_64(43)
extern const char * const pp_msgs[];
enum tt_ids {
TT_INSTR = 0,
TT_DATA,
@ -65,19 +70,13 @@ enum rrrr_ids {
R4_SNOOP,
};
extern const char * const tt_msgs[];
extern const char * const ll_msgs[];
extern const char * const rrrr_msgs[];
extern const char * const pp_msgs[];
extern const char * const to_msgs[];
extern const char * const ii_msgs[];
/*
* per-family decoder ops
*/
struct amd_decoder_ops {
bool (*mc0_mce)(u16, u8);
bool (*mc1_mce)(u16, u8);
bool (*mc2_mce)(u16, u8);
};
void amd_report_gart_errors(bool);

View File

@ -301,7 +301,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
"%s: Unable to requiest irq %d for "
"%s: Unable to request irq %d for "
"MPC85xx PCI err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
@ -583,7 +583,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
"[EDAC] L2 err", edac_dev);
if (res < 0) {
printk(KERN_ERR
"%s: Unable to requiest irq %d for "
"%s: Unable to request irq %d for "
"MPC85xx L2 err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;