sparc: Call OF and MD cpu scanning explicitly from paging_init()

We need to split up the cpu present mask setup from the cpu_data
initialization, and this is a first step towards that.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2009-04-01 03:13:15 -07:00
parent 5052f525fd
commit 890db403d5
6 changed files with 6 additions and 8 deletions

View File

@ -87,6 +87,7 @@ extern int of_node_to_nid(struct device_node *dp);
extern void prom_build_devicetree(void); extern void prom_build_devicetree(void);
extern void of_populate_present_mask(void); extern void of_populate_present_mask(void);
extern void of_fill_in_cpu_data(void);
/* Dummy ref counting routines - to be implemented later */ /* Dummy ref counting routines - to be implemented later */
static inline struct device_node *of_node_get(struct device_node *node) static inline struct device_node *of_node_get(struct device_node *node)

View File

@ -919,7 +919,6 @@ void __init sun4v_mdesc_init(void)
{ {
struct mdesc_handle *hp; struct mdesc_handle *hp;
unsigned long len, real_len, status; unsigned long len, real_len, status;
cpumask_t mask;
(void) sun4v_mach_desc(0UL, 0UL, &len); (void) sun4v_mach_desc(0UL, 0UL, &len);
@ -943,7 +942,4 @@ void __init sun4v_mdesc_init(void)
cur_mdesc = hp; cur_mdesc = hp;
report_platform_properties(); report_platform_properties();
cpus_setall(mask);
mdesc_fill_in_cpu_data(mask);
} }

View File

@ -22,7 +22,6 @@ static inline int is_root_node(const struct device_node *dp)
extern char *build_path_component(struct device_node *dp); extern char *build_path_component(struct device_node *dp);
extern void of_console_init(void); extern void of_console_init(void);
extern void of_fill_in_cpu_data(void);
extern unsigned int prom_early_allocated; extern unsigned int prom_early_allocated;

View File

@ -313,6 +313,4 @@ void __init prom_build_devicetree(void)
printk("PROM: Built device tree with %u bytes of memory.\n", printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated); prom_early_allocated);
of_fill_in_cpu_data();
} }

View File

@ -358,6 +358,7 @@ void __init paging_init(void)
protection_map[15] = PAGE_SHARED; protection_map[15] = PAGE_SHARED;
btfixup(); btfixup();
prom_build_devicetree(); prom_build_devicetree();
of_fill_in_cpu_data();
device_scan(); device_scan();
} }

View File

@ -1806,9 +1806,12 @@ void __init paging_init(void)
real_setup_per_cpu_areas(); real_setup_per_cpu_areas();
prom_build_devicetree(); prom_build_devicetree();
of_fill_in_cpu_data();
if (tlb_type == hypervisor) if (tlb_type == hypervisor) {
sun4v_mdesc_init(); sun4v_mdesc_init();
mdesc_fill_in_cpu_data(CPU_MASK_ALL);
}
/* Once the OF device tree and MDESC have been setup, we know /* Once the OF device tree and MDESC have been setup, we know
* the list of possible cpus. Therefore we can allocate the * the list of possible cpus. Therefore we can allocate the