Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (93 commits) x86, tlb, UV: Do small micro-optimization for native_flush_tlb_others() x86-64, NUMA: Don't call numa_set_distanc() for all possible node combinations during emulation x86-64, NUMA: Don't assume phys node 0 is always online in numa_emulation() x86-64, NUMA: Clean up initmem_init() x86-64, NUMA: Fix numa_emulation code with node0 without RAM x86-64, NUMA: Revert NUMA affine page table allocation x86: Work around old gas bug x86-64, NUMA: Better explain numa_distance handling x86-64, NUMA: Fix distance table handling mm: Move early_node_map[] reverse scan helpers under HAVE_MEMBLOCK x86-64, NUMA: Fix size of numa_distance array x86: Rename e820_table_* to pgt_buf_* bootmem: Move __alloc_memory_core_early() to nobootmem.c bootmem: Move contig_page_data definition to bootmem.c/nobootmem.c bootmem: Separate out CONFIG_NO_BOOTMEM code into nobootmem.c x86-64, NUMA: Seperate out numa_alloc_distance() from numa_set_distance() x86-64, NUMA: Add proper function comments to global functions x86-64, NUMA: Move NUMA emulation into numa_emulation.c x86-64, NUMA: Prepare numa_emulation() for moving NUMA emulation into a separate file x86-64, NUMA: Do not scan two times for setup_node_bootmem() ... Fix up conflicts in arch/x86/kernel/smpboot.c
This commit is contained in:
@@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
||||
|
||||
struct _cache_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct _cpuid4_info *, char *);
|
||||
ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
|
||||
ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
|
||||
ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
|
||||
unsigned int);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_AMD_NB
|
||||
@@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
|
||||
|
||||
#define SHOW_CACHE_DISABLE(slot) \
|
||||
static ssize_t \
|
||||
show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
|
||||
show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
|
||||
unsigned int cpu) \
|
||||
{ \
|
||||
return show_cache_disable(this_leaf, buf, slot); \
|
||||
}
|
||||
@@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
||||
#define STORE_CACHE_DISABLE(slot) \
|
||||
static ssize_t \
|
||||
store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
|
||||
const char *buf, size_t count) \
|
||||
const char *buf, size_t count, \
|
||||
unsigned int cpu) \
|
||||
{ \
|
||||
return store_cache_disable(this_leaf, buf, count, slot); \
|
||||
}
|
||||
@@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
|
||||
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
||||
show_cache_disable_1, store_cache_disable_1);
|
||||
|
||||
static ssize_t
|
||||
show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
|
||||
{
|
||||
if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
||||
return -EINVAL;
|
||||
|
||||
return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
|
||||
unsigned int cpu)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
||||
return -EINVAL;
|
||||
|
||||
if (strict_strtoul(buf, 16, &val) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (amd_set_subcaches(cpu, val))
|
||||
return -EINVAL;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct _cache_attr subcaches =
|
||||
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
|
||||
|
||||
#else /* CONFIG_AMD_NB */
|
||||
#define amd_init_l3_cache(x, y)
|
||||
#endif /* CONFIG_AMD_NB */
|
||||
@@ -532,9 +568,9 @@ static int
|
||||
__cpuinit cpuid4_cache_lookup_regs(int index,
|
||||
struct _cpuid4_info_regs *this_leaf)
|
||||
{
|
||||
union _cpuid4_leaf_eax eax;
|
||||
union _cpuid4_leaf_ebx ebx;
|
||||
union _cpuid4_leaf_ecx ecx;
|
||||
union _cpuid4_leaf_eax eax;
|
||||
union _cpuid4_leaf_ebx ebx;
|
||||
union _cpuid4_leaf_ecx ecx;
|
||||
unsigned edx;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
@@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
|
||||
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
|
||||
|
||||
#define show_one_plus(file_name, object, val) \
|
||||
static ssize_t show_##file_name \
|
||||
(struct _cpuid4_info *this_leaf, char *buf) \
|
||||
static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
|
||||
unsigned int cpu) \
|
||||
{ \
|
||||
return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
|
||||
}
|
||||
@@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
|
||||
show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
|
||||
show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
|
||||
|
||||
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
|
||||
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
|
||||
unsigned int cpu)
|
||||
{
|
||||
return sprintf(buf, "%luK\n", this_leaf->size / 1024);
|
||||
}
|
||||
@@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
|
||||
static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
|
||||
unsigned int cpu)
|
||||
{
|
||||
return show_shared_cpu_map_func(leaf, 0, buf);
|
||||
}
|
||||
|
||||
static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
|
||||
static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
|
||||
unsigned int cpu)
|
||||
{
|
||||
return show_shared_cpu_map_func(leaf, 1, buf);
|
||||
}
|
||||
|
||||
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
|
||||
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
|
||||
unsigned int cpu)
|
||||
{
|
||||
switch (this_leaf->eax.split.type) {
|
||||
case CACHE_TYPE_DATA:
|
||||
@@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
|
||||
if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
|
||||
n += 2;
|
||||
|
||||
if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
||||
n += 1;
|
||||
|
||||
attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
|
||||
if (attrs == NULL)
|
||||
return attrs = default_attrs;
|
||||
@@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
|
||||
attrs[n++] = &cache_disable_1.attr;
|
||||
}
|
||||
|
||||
if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
|
||||
attrs[n++] = &subcaches.attr;
|
||||
|
||||
return attrs;
|
||||
}
|
||||
#endif
|
||||
@@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
|
||||
ret = fattr->show ?
|
||||
fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
|
||||
buf) :
|
||||
buf, this_leaf->cpu) :
|
||||
0;
|
||||
return ret;
|
||||
}
|
||||
@@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
ret = fattr->store ?
|
||||
fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
|
||||
buf, count) :
|
||||
buf, count, this_leaf->cpu) :
|
||||
0;
|
||||
return ret;
|
||||
}
|
||||
|
Reference in New Issue
Block a user