Commit 76df73ff authored by John W. Linville's avatar John W. Linville

Merge branch 'from-linus' into upstream

parents 5c601d0c 86bc843a
......@@ -19,6 +19,7 @@ Contents:
- Control dependencies.
- SMP barrier pairing.
- Examples of memory barrier sequences.
- Read memory barriers vs load speculation.
(*) Explicit kernel barriers.
......@@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
we may get either of:
STORE *A = X; Y = LOAD *A;
STORE *A = Y;
STORE *A = Y = X;
=========================
......@@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
(4) General memory barriers.
A general memory barrier is a combination of both a read memory barrier
and a write memory barrier. It is a partial ordering over both loads and
stores.
A general memory barrier gives a guarantee that all the LOAD and STORE
operations specified before the barrier will appear to happen before all
the LOAD and STORE operations specified after the barrier with respect to
the other components of the system.
A general memory barrier is a partial ordering over both loads and stores.
General memory barriers imply both read and write memory barriers, and so
can substitute for either.
......@@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
=============== ===============
a = 1;
<write barrier>
b = 2; x = a;
b = 2; x = b;
<read barrier>
y = b;
y = a;
Or:
......@@ -563,6 +567,18 @@ Or:
Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
[!] Note that the stores before the write barrier would normally be expected to
match the loads after the read barrier or data dependency barrier, and vice
versa:
CPU 1 CPU 2
=============== ===============
a = 1; }---- --->{ v = c
b = 2; } \ / { w = d
<write barrier> \ <read barrier>
c = 3; } / \ { x = a;
d = 4; }---- --->{ y = b;
EXAMPLES OF MEMORY BARRIER SEQUENCES
------------------------------------
......@@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
| | +------+
+-------+ : :
|
| Sequence in which stores committed to memory system
| by CPU 1
| Sequence in which stores are committed to the
| memory system by CPU 1
V
......@@ -683,14 +699,12 @@ then the following will occur:
| : : | |
| : : | CPU 2 |
| +-------+ | |
\ | X->9 |------>| |
\ +-------+ | |
----->| B->2 | | |
+-------+ | |
Makes sure all effects ---> ddddddddddddddddd | |
prior to the store of C +-------+ | |
are perceptible to | B->2 |------>| |
successive loads +-------+ | |
| | X->9 |------>| |
| +-------+ | |
Makes sure all effects ---> \ ddddddddddddddddd | |
prior to the store of C \ +-------+ | |
are perceptible to ----->| B->2 |------>| |
subsequent loads +-------+ | |
: : +-------+
......@@ -699,73 +713,239 @@ following sequence of events:
CPU 1 CPU 2
======================= =======================
{ A = 0, B = 9 }
STORE A=1
STORE B=2
STORE C=3
<write barrier>
STORE D=4
STORE E=5
LOAD A
STORE B=2
LOAD B
LOAD C
LOAD D
LOAD E
LOAD A
Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
some effectively random order, despite the write barrier issued by CPU 1:
+-------+ : :
| | +------+
| |------>| C=3 | }
| | : +------+ }
| | : | A=1 | }
| | : +------+ }
| CPU 1 | : | B=2 | }---
| | +------+ } \
| | wwwwwwwwwwwww} \
| | +------+ } \ : : +-------+
| | : | E=5 | } \ +-------+ | |
| | : +------+ } \ { | C->3 |------>| |
| |------>| D=4 | } \ { +-------+ : | |
| | +------+ \ { | E->5 | : | |
+-------+ : : \ { +-------+ : | |
Transfer -->{ | A->1 | : | CPU 2 |
from CPU 1 { +-------+ : | |
to CPU 2 { | D->4 | : | |
{ +-------+ : | |
{ | B->2 |------>| |
+-------+ | |
: : +-------+
If, however, a read barrier were to be placed between the load of C and the
load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
perceived correctly by CPU 2.
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| | A->0 |------>| |
| +-------+ | |
| : : +-------+
\ : :
\ +-------+
---->| A->1 |
+-------+
: :
+-------+ : :
| | +------+
| |------>| C=3 | }
| | : +------+ }
| | : | A=1 | }---
| | : +------+ } \
| CPU 1 | : | B=2 | } \
| | +------+ \
| | wwwwwwwwwwwwwwww \
| | +------+ \ : : +-------+
| | : | E=5 | } \ +-------+ | |
| | : +------+ }--- \ { | C->3 |------>| |
| |------>| D=4 | } \ \ { +-------+ : | |
| | +------+ \ -->{ | B->2 | : | |
+-------+ : : \ { +-------+ : | |
\ { | A->1 | : | CPU 2 |
\ +-------+ | |
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
barrier causes all effects \ +-------+ | |
prior to the storage of C \ { | E->5 | : | |
to be perceptible to CPU 2 -->{ +-------+ : | |
{ | D->4 |------>| |
+-------+ | |
: : +-------+
If, however, a read barrier were to be placed between the load of E and the
load of A on CPU 2:
CPU 1 CPU 2
======================= =======================
{ A = 0, B = 9 }
STORE A=1
<write barrier>
STORE B=2
LOAD B
<read barrier>
LOAD A
then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
2:
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| : : | |
| : : | |
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
barrier causes all effects \ +-------+ | |
prior to the storage of B ---->| A->1 |------>| |
to be perceptible to CPU 2 +-------+ | |
: : +-------+
To illustrate this more completely, consider what could happen if the code
contained a load of A either side of the read barrier:
CPU 1 CPU 2
======================= =======================
{ A = 0, B = 9 }
STORE A=1
<write barrier>
STORE B=2
LOAD B
LOAD A [first load of A]
<read barrier>
LOAD A [second load of A]
Even though the two loads of A both occur after the load of B, they may both
come up with different values:
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| : : | |
| : : | |
| +-------+ | |
| | A->0 |------>| 1st |
| +-------+ | |
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
barrier causes all effects \ +-------+ | |
prior to the storage of B ---->| A->1 |------>| 2nd |
to be perceptible to CPU 2 +-------+ | |
: : +-------+
But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
before the read barrier completes anyway:
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| : : | |
\ : : | |
\ +-------+ | |
---->| A->1 |------>| 1st |
+-------+ | |
rrrrrrrrrrrrrrrrr | |
+-------+ | |
| A->1 |------>| 2nd |
+-------+ | |
: : +-------+
The guarantee is that the second load will always come up with A == 1 if the
load of B came up with B == 2. No such guarantee exists for the first load of
A; that may come up with either A == 0 or A == 1.
READ MEMORY BARRIERS VS LOAD SPECULATION
----------------------------------------
Many CPUs speculate with loads: that is they see that they will need to load an
item from memory, and they find a time where they're not using the bus for any
other loads, and so do the load in advance - even though they haven't actually
got to that point in the instruction execution flow yet. This permits the
actual load instruction to potentially complete immediately because the CPU
already has the value to hand.
It may turn out that the CPU didn't actually need the value - perhaps because a
branch circumvented the load - in which case it can discard the value or just
cache it for later use.
Consider:
CPU 1 CPU 2
======================= =======================
LOAD B
DIVIDE } Divide instructions generally
DIVIDE } take a long time to perform
LOAD A
Which might appear as this:
: : +-------+
+-------+ | |
--->| B->2 |------>| |
+-------+ | CPU 2 |
: :DIVIDE | |
+-------+ | |
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
division speculates on the +-------+ ~ | |
LOAD of A : : ~ | |
: :DIVIDE | |
: : ~ | |
Once the divisions are complete --> : : ~-->| |
the CPU can then perform the : : | |
LOAD with immediate effect : : +-------+
Placing a read barrier or a data dependency barrier just before the second
load:
CPU 1 CPU 2
======================= =======================
LOAD B
DIVIDE
DIVIDE
<read barrier>
LOAD A
will force any value speculatively obtained to be reconsidered to an extent
dependent on the type of barrier used. If there was no change made to the
speculated memory location, then the speculated value will just be used:
: : +-------+
+-------+ | |
--->| B->2 |------>| |
+-------+ | CPU 2 |
: :DIVIDE | |
+-------+ | |
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
division speculates on the +-------+ ~ | |
LOAD of A : : ~ | |
: :DIVIDE | |
: : ~ | |
: : ~ | |
rrrrrrrrrrrrrrrr~ | |
: : ~ | |
: : ~-->| |
: : | |
: : +-------+
but if there was an update or an invalidation from another CPU pending, then
the speculation will be cancelled and the value reloaded:
: : +-------+
+-------+ | |
--->| B->2 |------>| |
+-------+ | CPU 2 |
: :DIVIDE | |
+-------+ | |
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
division speculates on the +-------+ ~ | |
LOAD of A : : ~ | |
: :DIVIDE | |
: : ~ | |
: : ~ | |
rrrrrrrrrrrrrrrrr | |
+-------+ | |
The speculation is discarded ---> --->| A->1 |------>| |
and an updated value is +-------+ | |
retrieved : : +-------+
========================
......@@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
===============================
Some of the other functions in the linux kernel imply memory barriers, amongst
which are locking, scheduling and memory allocation functions.
which are locking and scheduling functions.
This specification is a _minimum_ guarantee; any particular architecture may
provide more substantial guarantees, but these may not be relied upon outside
......@@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
barriers is that the effects instructions outside of a critical section may
seep into the inside of the critical section.
A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
because it is possible for an access preceding the LOCK to happen after the
LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
two accesses can themselves then cross:
*A = a;
LOCK
UNLOCK
*B = b;
may occur as:
LOCK, STORE *B, STORE *A, UNLOCK
Locks and semaphores may not provide any guarantee of ordering on UP compiled
systems, and so cannot be counted on in such a situation to actually achieve
anything at all - especially with respect to I/O accesses - unless combined
......@@ -1016,8 +1210,6 @@ Other functions that imply barriers:
(*) schedule() and similar imply full memory barriers.
(*) Memory allocation and release functions imply full memory barriers.
=================================
INTER-CPU LOCKING BARRIER EFFECTS
......
......@@ -568,6 +568,18 @@ L: linuxppc-dev@ozlabs.org
W: http://www.penguinppc.org/ppc64/
S: Supported
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
P: Michael Chan
M: mchan@broadcom.com
L: netdev@vger.kernel.org
S: Supported
BROADCOM TG3 GIGABIT ETHERNET DRIVER
P: Michael Chan
M: mchan@broadcom.com
L: netdev@vger.kernel.org
S: Supported
BTTV VIDEO4LINUX DRIVER
P: Mauro Carvalho Chehab
M: mchehab@infradead.org
......@@ -1877,6 +1889,11 @@ L: linux-kernel@vger.kernel.org
W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
S: Maintained
MULTIMEDIA CARD SUBSYSTEM
P: Russell King
M: rmk+mmc@arm.linux.org.uk
S: Maintained
MULTISOUND SOUND DRIVER
P: Andrew Veliath
M: andrewtv@usa.net
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 17
EXTRAVERSION =-rc5
NAME=Lordi Rules
EXTRAVERSION =-rc6
NAME=Crazed Snow-Weasel
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
......
......@@ -182,7 +182,6 @@ EXPORT_SYMBOL(smp_num_cpus);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_on_cpu);
EXPORT_SYMBOL(_atomic_dec_and_lock);
EXPORT_SYMBOL(cpu_present_mask);
#endif /* CONFIG_SMP */
/*
......
......@@ -94,7 +94,7 @@ common_shutdown_1(void *generic_ptr)
if (cpuid != boot_cpuid) {
flags |= 0x00040000UL; /* "remain halted" */
*pflags = flags;
clear_bit(cpuid, &cpu_present_mask);
cpu_clear(cpuid, cpu_present_map);
halt();
}
#endif
......@@ -120,8 +120,8 @@ common_shutdown_1(void *generic_ptr)
#ifdef CONFIG_SMP
/* Wait for the secondaries to halt. */
cpu_clear(boot_cpuid, cpu_possible_map);
while (cpus_weight(cpu_possible_map))
cpu_clear(boot_cpuid, cpu_present_map);
while (cpus_weight(cpu_present_map))
barrier();
#endif
......
......@@ -68,7 +68,6 @@ enum ipi_message_type {
static int smp_secondary_alive __initdata = 0;
/* Which cpus ids came online. */
cpumask_t cpu_present_mask;
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
......@@ -439,7 +438,7 @@ setup_smp(void)
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
/* Assume here that "whami" == index */
cpu_set(i, cpu_present_mask);
cpu_set(i, cpu_present_map);
cpu->pal_revision = boot_cpu_palrev;
}
......@@ -450,11 +449,10 @@ setup_smp(void)
}
} else {
smp_num_probed = 1;
cpu_set(boot_cpuid, cpu_present_mask);
}
printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
smp_num_probed, cpu_possible_map.bits[0]);
printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
smp_num_probed, cpu_present_map.bits[0]);
}
/*
......@@ -473,7 +471,7 @@ smp_prepare_cpus(unsigned int max_cpus)
/* Nothing to do on a UP box, or when told not to. */
if (smp_num_probed == 1 || max_cpus == 0) {
cpu_present_mask = cpumask_of_cpu(boot_cpuid);
cpu_present_map = cpumask_of_cpu(boot_cpuid);
printk(KERN_INFO "SMP mode deactivated.\n");
return;
}
......@@ -486,10 +484,6 @@ smp_prepare_cpus(unsigned int max_cpus)
void __devinit
smp_prepare_boot_cpu(void)
{
/*
* Mark the boot cpu (current cpu) as online
*/
cpu_set(smp_processor_id(), cpu_online_map);
}
int __devinit
......
......@@ -66,7 +66,7 @@ titan_update_irq_hw(unsigned long mask)
register int bcpu = boot_cpuid;
#ifdef CONFIG_SMP
cpumask_t cpm = cpu_present_mask;
cpumask_t cpm = cpu_present_map;
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
unsigned long mask0, mask1, mask2, mask3, dummy;
......
......@@ -101,7 +101,7 @@ config DEBUG_S3C2410_UART
help
Choice for UART for kernel low-level using S3C2410 UARTS,
should be between zero and two. The port must have been
initalised by the boot-loader before use.
initialised by the boot-loader before use.
The uncompressor code port configuration is now handled
by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
......
......@@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
}
}
static unsigned char ts72xx_rtc_readb(unsigned long addr)
static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
}
static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr)
static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
__raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
}
static struct m48t86_ops ts72xx_rtc_ops = {
.readb = ts72xx_rtc_readb,
.writeb = ts72xx_rtc_writeb,
.readbyte = ts72xx_rtc_readbyte,
.writebyte = ts72xx_rtc_writebyte,
};
static struct platform_device ts72xx_rtc_device = {
......
......@@ -127,7 +127,7 @@ static void
imx_gpio_ack_irq(unsigned int irq)
{
DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq);
ISR(IRQ_TO_REG(irq)) |= 1 << ((irq - IRQ_GPIOA(0)) % 32);
ISR(IRQ_TO_REG(irq)) = 1 << ((irq - IRQ_GPIOA(0)) % 32);
}
static void
......
......@@ -232,8 +232,6 @@ static void __init intcp_init_irq(void)
for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
if (i == 11)
i = 22;