1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_SMP_H |
3 | #define _ASM_X86_SMP_H |
4 | #ifndef __ASSEMBLER__ |
5 | #include <linux/cpumask.h> |
6 | #include <linux/thread_info.h> |
7 | |
8 | #include <asm/cpumask.h> |
9 | |
10 | DECLARE_PER_CPU_CACHE_HOT(int, cpu_number); |
11 | |
12 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
13 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); |
14 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); |
15 | /* cpus sharing the last level cache: */ |
16 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); |
17 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); |
18 | |
19 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid); |
20 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); |
21 | |
22 | struct task_struct; |
23 | |
24 | struct smp_ops { |
25 | void (*smp_prepare_boot_cpu)(void); |
26 | void (*smp_prepare_cpus)(unsigned max_cpus); |
27 | void (*smp_cpus_done)(unsigned max_cpus); |
28 | |
29 | void (*stop_other_cpus)(int wait); |
30 | void (*crash_stop_other_cpus)(void); |
31 | void (*smp_send_reschedule)(int cpu); |
32 | |
33 | void (*cleanup_dead_cpu)(unsigned cpu); |
34 | void (*poll_sync_state)(void); |
35 | int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle); |
36 | int (*cpu_disable)(void); |
37 | void (*cpu_die)(unsigned int cpu); |
38 | void (*play_dead)(void); |
39 | void (*stop_this_cpu)(void); |
40 | |
41 | void (*send_call_func_ipi)(const struct cpumask *mask); |
42 | void (*send_call_func_single_ipi)(int cpu); |
43 | }; |
44 | |
45 | /* Globals due to paravirt */ |
46 | extern void set_cpu_sibling_map(int cpu); |
47 | |
48 | #ifdef CONFIG_SMP |
49 | extern struct smp_ops smp_ops; |
50 | |
51 | static inline void smp_send_stop(void) |
52 | { |
53 | smp_ops.stop_other_cpus(0); |
54 | } |
55 | |
56 | static inline void stop_other_cpus(void) |
57 | { |
58 | smp_ops.stop_other_cpus(1); |
59 | } |
60 | |
61 | static inline void smp_prepare_cpus(unsigned int max_cpus) |
62 | { |
63 | smp_ops.smp_prepare_cpus(max_cpus); |
64 | } |
65 | |
66 | static inline void smp_cpus_done(unsigned int max_cpus) |
67 | { |
68 | smp_ops.smp_cpus_done(max_cpus); |
69 | } |
70 | |
71 | static inline int __cpu_disable(void) |
72 | { |
73 | return smp_ops.cpu_disable(); |
74 | } |
75 | |
76 | static inline void __cpu_die(unsigned int cpu) |
77 | { |
78 | if (smp_ops.cpu_die) |
79 | smp_ops.cpu_die(cpu); |
80 | } |
81 | |
82 | static inline void __noreturn play_dead(void) |
83 | { |
84 | smp_ops.play_dead(); |
85 | BUG(); |
86 | } |
87 | |
88 | static inline void arch_smp_send_reschedule(int cpu) |
89 | { |
90 | smp_ops.smp_send_reschedule(cpu); |
91 | } |
92 | |
93 | static inline void arch_send_call_function_single_ipi(int cpu) |
94 | { |
95 | smp_ops.send_call_func_single_ipi(cpu); |
96 | } |
97 | |
98 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
99 | { |
100 | smp_ops.send_call_func_ipi(mask); |
101 | } |
102 | |
103 | void cpu_disable_common(void); |
104 | void native_smp_prepare_boot_cpu(void); |
105 | void smp_prepare_cpus_common(void); |
106 | void native_smp_prepare_cpus(unsigned int max_cpus); |
107 | void native_smp_cpus_done(unsigned int max_cpus); |
108 | int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); |
109 | int native_kick_ap(unsigned int cpu, struct task_struct *tidle); |
110 | int native_cpu_disable(void); |
111 | void __noreturn hlt_play_dead(void); |
112 | void native_play_dead(void); |
113 | void play_dead_common(void); |
114 | void wbinvd_on_cpu(int cpu); |
115 | int wbinvd_on_all_cpus(void); |
116 | |
117 | void smp_kick_mwait_play_dead(void); |
118 | void __noreturn mwait_play_dead(unsigned int eax_hint); |
119 | |
120 | void native_smp_send_reschedule(int cpu); |
121 | void native_send_call_func_ipi(const struct cpumask *mask); |
122 | void native_send_call_func_single_ipi(int cpu); |
123 | |
124 | asmlinkage __visible void smp_reboot_interrupt(void); |
125 | __visible void smp_reschedule_interrupt(struct pt_regs *regs); |
126 | __visible void smp_call_function_interrupt(struct pt_regs *regs); |
127 | __visible void smp_call_function_single_interrupt(struct pt_regs *r); |
128 | |
129 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
130 | #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) |
131 | |
132 | /* |
133 | * This function is needed by all SMP systems. It must _always_ be valid |
134 | * from the initial startup. |
135 | */ |
136 | #define raw_smp_processor_id() this_cpu_read(cpu_number) |
137 | #define __smp_processor_id() __this_cpu_read(cpu_number) |
138 | |
139 | static inline struct cpumask *cpu_llc_shared_mask(int cpu) |
140 | { |
141 | return per_cpu(cpu_llc_shared_map, cpu); |
142 | } |
143 | |
144 | static inline struct cpumask *cpu_l2c_shared_mask(int cpu) |
145 | { |
146 | return per_cpu(cpu_l2c_shared_map, cpu); |
147 | } |
148 | |
149 | #else /* !CONFIG_SMP */ |
150 | #define wbinvd_on_cpu(cpu) wbinvd() |
151 | static inline int wbinvd_on_all_cpus(void) |
152 | { |
153 | wbinvd(); |
154 | return 0; |
155 | } |
156 | |
157 | static inline struct cpumask *cpu_llc_shared_mask(int cpu) |
158 | { |
159 | return (struct cpumask *)cpumask_of(0); |
160 | } |
161 | |
162 | static inline void __noreturn mwait_play_dead(unsigned int eax_hint) { BUG(); } |
163 | #endif /* CONFIG_SMP */ |
164 | |
165 | #ifdef CONFIG_DEBUG_NMI_SELFTEST |
166 | extern void nmi_selftest(void); |
167 | #else |
168 | #define nmi_selftest() do { } while (0) |
169 | #endif |
170 | |
171 | extern unsigned int smpboot_control; |
172 | extern unsigned long apic_mmio_base; |
173 | |
174 | #endif /* !__ASSEMBLER__ */ |
175 | |
176 | /* Control bits for startup_64 */ |
177 | #define STARTUP_READ_APICID 0x80000000 |
178 | |
179 | /* Top 8 bits are reserved for control */ |
180 | #define STARTUP_PARALLEL_MASK 0xFF000000 |
181 | |
182 | #endif /* _ASM_X86_SMP_H */ |
183 | |