Reposting from -stable.
Kind of a large patch, but in order to make an omlette, you need to
break a few servers.
This is a diff against -CURRENT, not stable-8 as I didn't get a chance
to test it. It is directly based off of changes that peter@ made to the
Yahoo FreeBSD 7 tree.
I have compile and boot tested this on my local machines, but I don't
have 64 CPU machines to test upon.
Sean
Index: sys/kern/subr_smp.c
===================================================================
--- sys/kern/subr_smp.c (revision 210421)
+++ sys/kern/subr_smp.c (working copy)
@@ -181,7 +181,7 @@
id = td->td_oncpu;
if (id == NOCPU)
return;
- ipi_selected(1 << id, IPI_AST);
+ ipi_selected(cputomask(id), IPI_AST);
}
/*
@@ -318,7 +318,7 @@
CTR1(KTR_SMP, "restart_cpus(%x)", map);
/* signal other cpus to restart */
- atomic_store_rel_int(&started_cpus, map);
+ atomic_store_rel_long(&started_cpus, map);
/* wait for each to clear its bit */
while ((stopped_cpus & map) != 0)
@@ -396,11 +396,11 @@
}
CPU_FOREACH(i) {
- if (((1 << i) & map) != 0)
+ if ((cputomask(i) & map) != 0)
ncpus++;
}
if (ncpus == 0)
- panic("ncpus is 0 with map=0x%x", map);
+ panic("ncpus is 0 with map=0x%lx", map);
/* obtain rendezvous lock */
mtx_lock_spin(&smp_ipi_mtx);
@@ -416,10 +416,10 @@
atomic_store_rel_int(&smp_rv_waiters[0], 0);
/* signal other processors, which will enter the IPI with interrupts off */
- ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);
+ ipi_selected(map & ~cputomask(curcpu), IPI_RENDEZVOUS);
/* Check if the current CPU is in the map */
- if ((map & (1 << curcpu)) != 0)
+ if ((map & cputomask(curcpu)) != 0)
smp_rendezvous_action();
if (teardown_func == smp_no_rendevous_barrier)
@@ -491,7 +491,7 @@
panic("Built bad topology at %p. CPU count %d != %d",
top, top->cg_count, mp_ncpus);
if (top->cg_mask != all_cpus)
- panic("Built bad topology at %p. CPU mask 0x%X != 0x%X",
+ panic("Built bad topology at %p. CPU mask 0x%lX != 0x%lX",
top, top->cg_mask, all_cpus);
return (top);
}
@@ -535,7 +535,7 @@
parent->cg_children++;
for (; parent != NULL; parent = parent->cg_parent) {
if ((parent->cg_mask & child->cg_mask) != 0)
- panic("Duplicate children in %p. mask 0x%X child 0x%X",
+ panic("Duplicate children in %p. mask 0x%lX child 0x%lX",
parent, parent->cg_mask, child->cg_mask);
parent->cg_mask |= child->cg_mask;
parent->cg_count += child->cg_count;
Index: sys/kern/sched_ule.c
===================================================================
--- sys/kern/sched_ule.c (revision 210421)
+++ sys/kern/sched_ule.c (working copy)
@@ -851,7 +851,7 @@
* IPI the target cpu to force it to reschedule with the new
* workload.
*/
- ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
+ ipi_cpu(TDQ_ID(low), IPI_PREEMPT);
}
tdq_unlock_pair(high, low);
return (moved);
@@ -974,7 +974,7 @@
return;
}
tdq->tdq_ipipending = 1;
- ipi_selected(1 << cpu, IPI_PREEMPT);
+ ipi_cpu(cpu, IPI_PREEMPT);
}
/*
@@ -2411,7 +2411,7 @@
cpu = ts->ts_cpu;
ts->ts_cpu = sched_pickcpu(td, 0);
if (cpu != PCPU_GET(cpuid))
- ipi_selected(1 << cpu, IPI_PREEMPT);
+ ipi_cpu(cpu, IPI_PREEMPT);
#endif
}
@@ -2642,11 +2642,11 @@
sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
"", indent, cg->cg_level);
- sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%x\">", indent, "",
+ sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"0x%lx\">", indent, "",
cg->cg_count, cg->cg_mask);
first = TRUE;
for (i = 0; i < MAXCPU; i++) {
- if ((cg->cg_mask & (1 << i)) != 0) {
+ if ((cg->cg_mask & cputomask(i)) != 0) {
if (!first)
sbuf_printf(sb, ", ");
else
Index: sys/kern/kern_ktr.c
===================================================================
--- sys/kern/kern_ktr.c (revision 210421)
+++ sys/kern/kern_ktr.c (working copy)
@@ -211,7 +211,7 @@
if ((ktr_mask & mask) == 0)
return;
cpu = KTR_CPU;
- if (((1 << cpu) & ktr_cpumask) == 0)
+ if ((cputomask(cpu) & ktr_cpumask) == 0)
return;
#if defined(KTR_VERBOSE) || defined(KTR_ALQ)
td = curthread;
Index: sys/kern/kern_pmc.c
===================================================================
--- sys/kern/kern_pmc.c (revision 210421)
+++ sys/kern/kern_pmc.c (working copy)
@@ -34,6 +34,7 @@
#include "opt_hwpmc_hooks.h"
#include <sys/types.h>
+#include <sys/systm.h>
#include <sys/pmc.h>
#include <sys/pmckern.h>
#include <sys/smp.h>
@@ -110,7 +111,7 @@
{
#ifdef SMP
return (pmc_cpu_is_present(cpu) &&
- (hlt_cpus_mask & (1 << cpu)) == 0);
+ (hlt_cpus_mask & cputomask(cpu)) == 0);
#else
return (1);
#endif
@@ -137,7 +138,7 @@
pmc_cpu_is_primary(int cpu)
{
#ifdef SMP
- return ((logical_cpus_mask & (1 << cpu)) == 0);
+ return ((logical_cpus_mask & cputomask(cpu)) == 0);
#else
return (1);
#endif
Index: sys/kern/subr_pcpu.c
===================================================================
--- sys/kern/subr_pcpu.c (revision 210421)
+++ sys/kern/subr_pcpu.c (working copy)
@@ -88,7 +88,7 @@
KASSERT(cpuid >= 0 && cpuid < MAXCPU,
("pcpu_init: invalid cpuid %d", cpuid));
pcpu->pc_cpuid = cpuid;
- pcpu->pc_cpumask = 1 << cpuid;
+ pcpu->pc_cpumask = cputomask(cpuid);
cpuid_to_pcpu[cpuid] = pcpu;
SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
cpu_pcpu_init(pcpu, cpuid, size);
Index: sys/kern/sched_4bsd.c
===================================================================
--- sys/kern/sched_4bsd.c (revision 210421)
+++ sys/kern/sched_4bsd.c (working copy)
@@ -1086,7 +1086,7 @@
me = PCPU_GET(cpumask);
/* Don't bother if we should be doing it ourself. */
- if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
+ if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == cputomask(cpunum)))
return (0);
dontuse = me | stopped_cpus | hlt_cpus_mask;
@@ -1108,7 +1108,7 @@
/* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
if (map != map3) {
- printf("map (%02X) != map3 (%02X)\n", map,
+ printf("map (%02lX) != map3 (%02lX)\n", map,
map3);
map = map3;
}
@@ -1120,7 +1120,7 @@
/* If we only allow a specific CPU, then mask off all the others. */
if (cpunum != NOCPU) {
KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
- map &= (1 << cpunum);
+ map &= cputomask(cpunum);
} else {
/* Try choose an idle die. */
if (forward_wakeup_use_htt) {
Index: sys/dev/hwpmc/hwpmc_mod.c
===================================================================
--- sys/dev/hwpmc/hwpmc_mod.c (revision 210421)
+++ sys/dev/hwpmc/hwpmc_mod.c (working copy)
@@ -1991,7 +1991,7 @@
* had already processed the interrupt). We don't
* lose the interrupt sample.
*/
- atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
+ atomic_clear_long(&pmc_cpumask, PCPU_GET(cpuid));
pmc_process_samples(PCPU_GET(cpuid));
break;
@@ -4083,7 +4083,7 @@
done:
/* mark CPU as needing processing */
- atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+ atomic_set_rel_long(&pmc_cpumask, cputomask(cpu));
return (error);
}
@@ -4193,7 +4193,7 @@
break;
if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
/* Need a rescan at a later time. */
- atomic_set_rel_int(&pmc_cpumask, (1 << cpu));
+ atomic_set_rel_long(&pmc_cpumask, cputomask(cpu));
break;
}
@@ -4782,7 +4782,7 @@
PMCDBG(MOD,INI,0, "%s", "cleanup");
/* switch off sampling */
- atomic_store_rel_int(&pmc_cpumask, 0);
+ atomic_store_rel_long(&pmc_cpumask, 0);
pmc_intr = NULL;
sx_xlock(&pmc_sx);
Index: sys/geom/eli/g_eli.c
===================================================================
--- sys/geom/eli/g_eli.c (revision 210421)
+++ sys/geom/eli/g_eli.c (working copy)
@@ -499,7 +499,7 @@
g_eli_cpu_is_disabled(int cpu)
{
#ifdef SMP
- return ((hlt_cpus_mask & (1 << cpu)) != 0);
+ return ((hlt_cpus_mask & cputomask(cpu)) != 0);
#else
return (0);
#endif
Index: sys/i386/include/smp.h
===================================================================
--- sys/i386/include/smp.h (revision 210421)
+++ sys/i386/include/smp.h (working copy)
@@ -62,6 +62,7 @@
void init_secondary(void);
int ipi_nmi_handler(void);
void ipi_selected(cpumask_t cpus, u_int ipi);
+#define ipi_cpu(_c, _i) ipi_selected(cputomask(_c), _i)
void ipi_all_but_self(u_int ipi);
#ifndef XEN
void ipi_bitmap_handler(struct trapframe frame);
Index: sys/i386/include/_types.h
===================================================================
--- sys/i386/include/_types.h (revision 210421)
+++ sys/i386/include/_types.h (working copy)
@@ -74,7 +74,7 @@
* Standard type definitions.
*/
typedef unsigned long __clock_t; /* clock()... */
-typedef unsigned int __cpumask_t;
+typedef unsigned long __cpumask_t;
typedef __int32_t __critical_t;
typedef long double __double_t;
typedef long double __float_t;
Index: sys/i386/i386/vm_machdep.c
===================================================================
--- sys/i386/i386/vm_machdep.c (revision 210421)
+++ sys/i386/i386/vm_machdep.c (working copy)
@@ -613,7 +613,7 @@
/* Restart CPU #0. */
/* XXX: restart_cpus(1 << 0); */
- atomic_store_rel_int(&started_cpus, (1 << 0));
+ atomic_store_rel_long(&started_cpus, cputomask(0));
cnt = 0;
while (cpu_reset_proxy_active == 0 && cnt < 10000000)
Index: sys/i386/i386/mp_machdep.c
===================================================================
--- sys/i386/i386/mp_machdep.c (revision 210421)
+++ sys/i386/i386/mp_machdep.c (working copy)
@@ -1313,7 +1313,7 @@
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, cpus);
+ atomic_set_long(&ipi_nmi_pending, cpus);
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
while ((cpu = ffs(cpus)) != 0) {
@@ -1376,7 +1376,7 @@
if ((ipi_nmi_pending & cpumask) == 0)
return (1);
- atomic_clear_int(&ipi_nmi_pending, cpumask);
+ atomic_clear_long(&ipi_nmi_pending, cpumask);
cpustop_handler();
return (0);
}
@@ -1394,14 +1394,14 @@
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus, cpumask);
+ atomic_set_long(&stopped_cpus, cpumask);
/* Wait for restart */
while (!(started_cpus & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ atomic_clear_long(&started_cpus, cpumask);
+ atomic_clear_long(&stopped_cpus, cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
Index: sys/cddl/dev/dtrace/amd64/dtrace_subr.c
===================================================================
--- sys/cddl/dev/dtrace/amd64/dtrace_subr.c (revision 210421)
+++ sys/cddl/dev/dtrace/amd64/dtrace_subr.c (working copy)
@@ -120,14 +120,14 @@
if (cpu == DTRACE_CPUALL)
cpus = all_cpus;
else
- cpus = (cpumask_t) (1 << cpu);
+ cpus = cputomask(cpu);
/* If the current CPU is in the set, call the function directly: */
- if ((cpus & (1 << curcpu)) != 0) {
+ if ((cpus & cputomask(curcpu)) != 0) {
(*func)(arg);
/* Mask the current CPU from the set */
- cpus &= ~(1 << curcpu);
+ cpus &= ~cputomask(curcpu);
}
/* If there are any CPUs in the set, cross-call to those CPUs */
Index: sys/amd64/include/smp.h
===================================================================
--- sys/amd64/include/smp.h (revision 210421)
+++ sys/amd64/include/smp.h (working copy)
@@ -62,6 +62,7 @@
void init_secondary(void);
int ipi_nmi_handler(void);
void ipi_selected(cpumask_t cpus, u_int ipi);
+void ipi_cpu(int cpu, u_int ipi);
void ipi_all_but_self(u_int ipi);
void ipi_bitmap_handler(struct trapframe frame);
u_int mp_bootaddress(u_int);
Index: sys/amd64/include/param.h
===================================================================
--- sys/amd64/include/param.h (revision 210421)
+++ sys/amd64/include/param.h (working copy)
@@ -64,7 +64,7 @@
#endif
#if defined(SMP) || defined(KLD_MODULE)
-#define MAXCPU 32
+#define MAXCPU 64
#else
#define MAXCPU 1
#endif
Index: sys/amd64/include/_types.h
===================================================================
--- sys/amd64/include/_types.h (revision 210421)
+++ sys/amd64/include/_types.h (working copy)
@@ -61,7 +61,7 @@
* Standard type definitions.
*/
typedef __int32_t __clock_t; /* clock()... */
-typedef unsigned int __cpumask_t;
+typedef unsigned long __cpumask_t;
typedef __int64_t __critical_t;
typedef double __double_t;
typedef float __float_t;
Index: sys/amd64/amd64/vm_machdep.c
===================================================================
--- sys/amd64/amd64/vm_machdep.c (revision 210421)
+++ sys/amd64/amd64/vm_machdep.c (working copy)
@@ -543,7 +543,7 @@
printf("cpu_reset: Restarting BSP\n");
/* Restart CPU #0. */
- atomic_store_rel_int(&started_cpus, 1 << 0);
+ atomic_store_rel_long(&started_cpus, cputomask(0));
cnt = 0;
while (cpu_reset_proxy_active == 0 && cnt < 10000000)
Index: sys/amd64/amd64/mptable.c
===================================================================
--- sys/amd64/amd64/mptable.c (revision 210421)
+++ sys/amd64/amd64/mptable.c (working copy)
@@ -888,13 +888,13 @@
* already in the table, then kill the fixup.
*/
for (id = 0; id <= MAX_LAPIC_ID; id++) {
- if ((id_mask & 1 << id) == 0)
+ if ((id_mask & (1ul << id)) == 0)
continue;
/* First, make sure we are on a logical_cpus boundary. */
if (id % logical_cpus != 0)
return;
for (i = id + 1; i < id + logical_cpus; i++)
- if ((id_mask & 1 << i) != 0)
+ if ((id_mask & (1ul << i)) != 0)
return;
}
@@ -911,7 +911,7 @@
i, id);
lapic_create(i, 0);
}
- id_mask &= ~(1 << id);
+ id_mask &= ~(1ul << id);
}
}
#endif /* MPTABLE_FORCE_HTT */
Index: sys/amd64/amd64/cpu_switch.S
===================================================================
--- sys/amd64/amd64/cpu_switch.S (revision 210421)
+++ sys/amd64/amd64/cpu_switch.S (working copy)
@@ -74,7 +74,7 @@
jz 1f
/* release bit from old pm_active */
movq PCPU(CURPMAP),%rdx
- LK btrl %eax,PM_ACTIVE(%rdx) /* clear old */
+ LK btrq %rax, VM_PMAP+PM_ACTIVE(%rdx) /* clear old */
1:
movq TD_PCB(%rsi),%r8 /* newtd->td_proc */
movq PCB_CR3(%r8),%rdx
@@ -138,14 +138,14 @@
movl PCPU(CPUID), %eax
/* Release bit from old pmap->pm_active */
movq PCPU(CURPMAP),%rcx
- LK btrl %eax,PM_ACTIVE(%rcx) /* clear old */
+ LK btrq %rax, VM_PMAP+PM_ACTIVE(%rdx) /* clear old */
SETLK %rdx, TD_LOCK(%rdi) /* Release the old thread */
swact:
/* Set bit in new pmap->pm_active */
movq TD_PROC(%rsi),%rdx /* newproc */
movq P_VMSPACE(%rdx), %rdx
addq $VM_PMAP,%rdx
- LK btsl %eax,PM_ACTIVE(%rdx) /* set new */
+ LK btsq %rax,PM_ACTIVE(%rdx) /* set new */
movq %rdx,PCPU(CURPMAP)
sw1:
Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c (revision 210421)
+++ sys/amd64/amd64/pmap.c (working copy)
@@ -567,7 +567,7 @@
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
kernel_pmap->pm_root = NULL;
- kernel_pmap->pm_active = -1; /* don't allow deactivation */
+ kernel_pmap->pm_active = (cpumask_t)-1; /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
/*
@@ -926,8 +926,8 @@
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
- u_int cpumask;
- u_int other_cpus;
+ cpumask_t cpumask;
+ cpumask_t other_cpus;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
@@ -947,8 +947,8 @@
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
- u_int cpumask;
- u_int other_cpus;
+ cpumask_t cpumask;
+ cpumask_t other_cpus;
vm_offset_t addr;
sched_pin();
@@ -972,8 +972,8 @@
void
pmap_invalidate_all(pmap_t pmap)
{
- u_int cpumask;
- u_int other_cpus;
+ cpumask_t cpumask;
+ cpumask_t other_cpus;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
@@ -5002,8 +5002,8 @@
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
#ifdef SMP
- atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
- atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
+ atomic_clear_long(&oldpmap->pm_active, PCPU_GET(cpumask));
+ atomic_set_long(&pmap->pm_active, PCPU_GET(cpumask));
#else
oldpmap->pm_active &= ~PCPU_GET(cpumask);
pmap->pm_active |= PCPU_GET(cpumask);
Index: sys/amd64/amd64/mp_machdep.c
===================================================================
--- sys/amd64/amd64/mp_machdep.c (revision 210421)
+++ sys/amd64/amd64/mp_machdep.c (working copy)
@@ -127,7 +127,7 @@
* Local data and functions.
*/
-static u_int logical_cpus;
+static cpumask_t logical_cpus;
static volatile cpumask_t ipi_nmi_pending;
/* used to hold the AP's until we are ready to release them */
@@ -892,7 +892,7 @@
panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
}
- all_cpus |= (1 << cpu); /* record AP in CPU map */
+ all_cpus |= cputomask(cpu); /* record AP in CPU map */
}
/* build our map of 'other' CPUs */
@@ -1050,27 +1050,16 @@
static void
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
- int ncpu, othercpus;
+ int cpu, ncpu, othercpus;
othercpus = mp_ncpus - 1;
- if (mask == (u_int)-1) {
- ncpu = othercpus;
- if (ncpu < 1)
+ if (mask == (cpumask_t)-1) {
+ if (othercpus < 1)
return;
} else {
mask &= ~PCPU_GET(cpumask);
if (mask == 0)
return;
- ncpu = bitcount32(mask);
- if (ncpu > othercpus) {
- /* XXX this should be a panic offence */
- printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
- ncpu, othercpus);
- ncpu = othercpus;
- }
- /* XXX should be a panic, implied by mask == 0 above */
- if (ncpu < 1)
- return;
}
if (!(read_rflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
@@ -1078,10 +1067,18 @@
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
- if (mask == (u_int)-1)
+ if (mask == (cpumask_t)-1) {
+ ncpu = othercpus;
ipi_all_but_self(vector);
- else
- ipi_selected(mask, vector);
+ } else {
+ ncpu = 0;
+ while ((cpu = ffsl(mask)) != 0) {
+ cpu--;
+ mask &= ~cputomask(cpu);
+ lapic_ipi_vectored(vector, cpu_apic_ids[cpu]);
+ ncpu++;
+ }
+ }
while (smp_tlb_wait < ncpu)
ia32_pause();
mtx_unlock_spin(&smp_ipi_mtx);
@@ -1225,12 +1222,12 @@
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, cpus);
+ atomic_set_long(&ipi_nmi_pending, cpus);
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
- while ((cpu = ffs(cpus)) != 0) {
+ while ((cpu = ffsl(cpus)) != 0) {
cpu--;
- cpus &= ~(1 << cpu);
+ cpus &= ~(cputomask(cpu));
KASSERT(cpu_apic_ids[cpu] != -1,
("IPI to non-existent CPU %d", cpu));
@@ -1251,6 +1248,41 @@
}
/*
+ * send an IPI to a specific cpu.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+ u_int bitmap = 0;
+ u_int old_pending;
+ u_int new_pending;
+
+ if (IPI_IS_BITMAPED(ipi)) {
+ bitmap = 1 << ipi;
+ ipi = IPI_BITMAP_VECTOR;
+ }
+
+#ifdef STOP_NMI
+ if (ipi == IPI_STOP && stop_cpus_with_nmi) {
+ ipi_nmi_selected(cputomask(cpu));
+ return;
+ }
+#endif
+ CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+
+ KASSERT(cpu_apic_ids[cpu] != -1,
+ ("IPI to non-existent CPU %d", cpu));
+
+ if (bitmap) {
+ do {
+ old_pending = cpu_ipi_pending[cpu];
+ new_pending = old_pending | bitmap;
+ } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
+ }
+ lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
+}
+
+/*
* send an IPI to all CPUs EXCEPT myself
*/
void
@@ -1268,7 +1300,7 @@
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
- atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
+ atomic_set_long(&ipi_nmi_pending, PCPU_GET(other_cpus));
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@@ -1289,7 +1321,7 @@
if ((ipi_nmi_pending & cpumask) == 0)
return (1);
- atomic_clear_int(&ipi_nmi_pending, cpumask);
+ atomic_clear_long(&ipi_nmi_pending, cpumask);
cpustop_handler();
return (0);
}
@@ -1302,19 +1334,19 @@
cpustop_handler(void)
{
int cpu = PCPU_GET(cpuid);
- int cpumask = PCPU_GET(cpumask);
+ cpumask_t cpumask = PCPU_GET(cpumask);
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
- atomic_set_int(&stopped_cpus, cpumask);
+ atomic_set_long(&stopped_cpus, cpumask);
/* Wait for restart */
while (!(started_cpus & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ atomic_clear_long(&started_cpus, cpumask);
+ atomic_clear_long(&stopped_cpus, cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
@@ -1340,7 +1372,7 @@
if (savectx2(stopxpcbs[cpu])) {
fpugetregs(curthread, stopfpu);
wbinvd();
- atomic_set_int(&stopped_cpus, cpumask);
+ atomic_set_long(&stopped_cpus, cpumask);
} else
fpusetregs(curthread, stopfpu);
@@ -1348,8 +1380,8 @@
while (!(started_cpus & cpumask))
ia32_pause();
- atomic_clear_int(&started_cpus, cpumask);
- atomic_clear_int(&stopped_cpus, cpumask);
+ atomic_clear_long(&started_cpus, cpumask);
+ atomic_clear_long(&stopped_cpus, cpumask);
/* Restore CR3 and enable interrupts */
load_cr3(cr3);
@@ -1381,7 +1413,7 @@
int error;
mask = hlt_cpus_mask;
- error = sysctl_handle_int(oidp, &mask, 0, req);
+ error = sysctl_handle_long(oidp, &mask, 0, req);
if (error || !req->newptr)
return (error);
@@ -1395,11 +1427,11 @@
mask |= hyperthreading_cpus_mask;
if ((mask & all_cpus) == all_cpus)
- mask &= ~(1<<0);
+ mask &= ~cputomask(0);
hlt_cpus_mask = mask;
return (error);
}
-SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
+SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_LONG|CTLFLAG_RW,
0, 0, sysctl_hlt_cpus, "IU",
"Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
@@ -1422,7 +1454,7 @@
hlt_cpus_mask |= hyperthreading_cpus_mask;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
- hlt_cpus_mask &= ~(1<<0);
+ hlt_cpus_mask &= ~~cputomask(0);
hlt_logical_cpus = disable;
return (error);
@@ -1460,7 +1492,7 @@
hlt_logical_cpus = 0;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
- hlt_cpus_mask &= ~(1<<0);
+ hlt_cpus_mask &= ~cputomask(0);
hyperthreading_allowed = allowed;
return (error);
@@ -1506,9 +1538,9 @@
int
mp_grab_cpu_hlt(void)
{
- u_int mask = PCPU_GET(cpumask);
+ cpumask_t mask = PCPU_GET(cpumask);
#ifdef MP_WATCHDOG
- u_int cpuid = PCPU_GET(cpuid);
+ cpumask_t cpuid = PCPU_GET(cpuid);
#endif
int retval;
Index: sys/amd64/amd64/intr_machdep.c
===================================================================
--- sys/amd64/amd64/intr_machdep.c (revision 210421)
+++ sys/amd64/amd64/intr_machdep.c (working copy)
@@ -444,7 +444,7 @@
*/
/* The BSP is always a valid target. */
-static cpumask_t intr_cpus = (1 << 0);
+static cpumask_t intr_cpus = cputomask(0);
static int current_cpu;
/*
@@ -466,7 +466,7 @@
current_cpu++;
if (current_cpu > mp_maxid)
current_cpu = 0;
- } while (!(intr_cpus & (1 << current_cpu)));
+ } while (!(intr_cpus & cputomask(current_cpu)));
mtx_unlock_spin(&icu_lock);
return (apic_id);
}
@@ -497,7 +497,7 @@
printf("INTR: Adding local APIC %d as a target\n",
cpu_apic_ids[cpu]);
- intr_cpus |= (1 << cpu);
+ intr_cpus |= cputomask(cpu);
}
/*
Index: sys/sys/smp.h
===================================================================
--- sys/sys/smp.h (revision 210421)
+++ sys/sys/smp.h (working copy)
@@ -89,7 +89,8 @@
* time, thus permitting us to configure sparse maps of cpuid-dependent
* (per-CPU) structures.
*/
-#define CPU_ABSENT(x_cpu) ((all_cpus & (1 << (x_cpu))) == 0)
+#include <sys/systm.h>
+#define CPU_ABSENT(x_cpu) ((all_cpus & (cputomask(x_cpu))) == 0)
/*
* Macros to iterate over non-absent CPUs. CPU_FOREACH() takes an
Index: sys/sys/gmon.h
===================================================================
--- sys/sys/gmon.h (revision 210421)
+++ sys/sys/gmon.h (working copy)
@@ -197,6 +197,7 @@
#define GPROF_FROMS 2 /* struct: from location hash bucket */
#define GPROF_TOS 3 /* struct: destination/count structure */
#define GPROF_GMONPARAM 4 /* struct: profiling parameters (see above) */
+#define GPROF_FREEBUF 5 /* int: free flat profiling buffer */
#ifdef _KERNEL
Index: sys/sys/systm.h
===================================================================
--- sys/sys/systm.h (revision 210421)
+++ sys/sys/systm.h (working copy)
@@ -423,4 +423,6 @@
return (x);
}
+#define cputomask(_cpu) ((__cpumask_t)1 << _cpu)
+
#endif /* !_SYS_SYSTM_H_ */
_______________________________________________
freebsd-sta...@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-stable
To unsubscribe, send any mail to "freebsd-stable-unsubscr...@freebsd.org"
_______________________________________________
freebsd-current@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-current
To unsubscribe, send any mail to "freebsd-current-unsubscr...@freebsd.org"