Commit a072738e authored by Cyrill Gorcunov's avatar Cyrill Gorcunov Committed by Ingo Molnar

perf, x86: Implement initial P4 PMU driver

The netburst PMU is way different from the "architectural
perfomance monitoring" specification that current CPUs use.
P4 uses a tuple of ESCR+CCCR+COUNTER MSR registers to handle
perfomance monitoring events.

A few implementational details:

1) We need a separate x86_pmu::hw_config helper in struct
   x86_pmu since register bit-fields are quite different from P6,
   Core and later cpu series.

2) For the same reason is a x86_pmu::schedule_events helper
   introduced.

3) hw_perf_event::config consists of packed ESCR+CCCR values.
   It's allowed since in reality both registers only use a half
   of their size. Of course before making a real write into a
   particular MSR we need to unpack the value and extend it to
   a proper size.

4) The tuple of packed ESCR+CCCR in hw_perf_event::config
   doesn't describe the memory address of ESCR MSR register
   so that we need to keep a mapping between these tuples
   used and available ESCR (various P4 events may use same
   ESCRs but not simultaneously), for this sake every active
   event has a per-cpu map of hw_perf_event::idx <--> ESCR
   addresses.

5) Since hw_perf_event::idx is an offset to counter/control register
   we need to lift X86_PMC_MAX_GENERIC up, otherwise kernel
   strips it down to 8 registers and event armed may never be turned
   off (ie the bit in active_mask is set but the loop never reaches
   this index to check), thanks to Peter Zijlstra

Restrictions:

 - No cascaded counters support (do we ever need them?)
 - No dependent events support (so PERF_COUNT_HW_INSTRUCTIONS
   doesn't work for now)
 - There are events with same counters which can't work simultaneously
   (need to use intersected ones due to broken counter 1)
 - No PERF_COUNT_HW_CACHE_ events yet

Todo:

 - Implement dependent events
 - Need proper hashing for event opcodes (no linear search, good for
   debugging stage but not in real loads)
 - Some events counted during a clock cycle -- need to set threshold
   for them and count every clock cycle just to get summary statistics
   (ie to behave the same way as other PMUs do)
 - Need to swicth to use event_constraints
 - To support RAW events we need to encode a global list of P4 events
   into p4_templates
 - Cache events need to be added

Event support status matrix:

 Event			status
 -----------------------------
 cycles			works
 cache-references	works
 cache-misses		works
 branch-misses		works
 bus-cycles		partially (does not work on 64bit cpu with HT enabled)
 instruction		doesnt work (needs dependent event [mop tagging])
 branches		doesnt work
Signed-off-by: default avatarCyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: default avatarLin Ming <ming.m.lin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100311165439.GB5129@lenovo>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9b33fa6b
......@@ -5,7 +5,7 @@
* Performance event hw details:
*/
#define X86_PMC_MAX_GENERIC 8
#define X86_PMC_MAX_GENERIC 32
#define X86_PMC_MAX_FIXED 3
#define X86_PMC_IDX_GENERIC 0
......
This diff is collapsed.
......@@ -190,6 +190,8 @@ struct x86_pmu {
void (*enable_all)(void);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
......@@ -415,6 +417,25 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
return 0;
}
static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
{
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
*/
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
/*
* Count user and OS events unless requested not to
*/
if (!attr->exclude_user)
hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
if (!attr->exclude_kernel)
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
return 0;
}
/*
* Setup the hardware configuration for a given attr_type
*/
......@@ -446,23 +467,13 @@ static int __hw_perf_event_init(struct perf_event *event)
event->destroy = hw_perf_event_destroy;
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
*/
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
hwc->idx = -1;
hwc->last_cpu = -1;
hwc->last_tag = ~0ULL;
/*
* Count user and OS events unless requested not to.
*/
if (!attr->exclude_user)
hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
if (!attr->exclude_kernel)
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
/* Processor specifics */
if (x86_pmu.hw_config(attr, hwc))
return -EOPNOTSUPP;
if (!hwc->sample_period) {
hwc->sample_period = x86_pmu.max_period;
......@@ -517,7 +528,7 @@ static int __hw_perf_event_init(struct perf_event *event)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
if (!attr->exclude_kernel)
return -EOPNOTSUPP;
}
......@@ -931,7 +942,7 @@ static int x86_pmu_enable(struct perf_event *event)
if (n < 0)
return n;
ret = x86_schedule_events(cpuc, n, assign);
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
return ret;
/*
......@@ -1263,7 +1274,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,
if (n0 < 0)
return n0;
ret = x86_schedule_events(cpuc, n0, assign);
ret = x86_pmu.schedule_events(cpuc, n0, assign);
if (ret)
return ret;
......@@ -1313,6 +1324,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,
#include "perf_event_amd.c"
#include "perf_event_p6.c"
#include "perf_event_p4.c"
#include "perf_event_intel_lbr.c"
#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
......@@ -1515,7 +1527,7 @@ static int validate_group(struct perf_event *event)
fake_cpuc->n_events = n;
ret = x86_schedule_events(fake_cpuc, n, NULL);
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out_free:
kfree(fake_cpuc);
......
......@@ -363,6 +363,8 @@ static __initconst struct x86_pmu amd_pmu = {
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
.hw_config = x86_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
......
......@@ -749,6 +749,8 @@ static __initconst struct x86_pmu core_pmu = {
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
.hw_config = x86_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
......@@ -786,6 +788,8 @@ static __initconst struct x86_pmu intel_pmu = {
.enable_all = intel_pmu_enable_all,
.enable = intel_pmu_enable_event,
.disable = intel_pmu_disable_event,
.hw_config = x86_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
......@@ -839,12 +843,13 @@ static __init int intel_pmu_init(void)
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
/* check for P6 processor family */
if (boot_cpu_data.x86 == 6) {
return p6_pmu_init();
} else {
switch (boot_cpu_data.x86) {
case 0x6:
return p6_pmu_init();
case 0xf:
return p4_pmu_init();
}
return -ENODEV;
}
}
/*
......
/*
* Netburst Perfomance Events (P4, old Xeon)
*
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
*
* For licencing details see kernel-base/COPYING
*/
#ifdef CONFIG_CPU_SUP_INTEL
#include <asm/perf_event_p4.h>
/*
* array indices: 0,1 - HT threads, used with HT enabled cpu
*/
struct p4_event_template {
u32 opcode; /* ESCR event + CCCR selector */
u64 config; /* packed predefined bits */
int dep; /* upstream dependency event index */
unsigned int emask; /* ESCR EventMask */
unsigned int escr_msr[2]; /* ESCR MSR for this event */
unsigned int cntr[2]; /* counter index (offset) */
};
struct p4_pmu_res {
/* maps hw_conf::idx into template for ESCR sake */
struct p4_event_template *tpl[ARCH_P4_MAX_CCCR];
};
static DEFINE_PER_CPU(struct p4_pmu_res, p4_pmu_config);
/*
* WARN: CCCR1 doesn't have a working enable bit so try to not
* use it if possible
*
* Also as only we start to support raw events we will need to
* append _all_ P4_EVENT_PACK'ed events here
*/
struct p4_event_template p4_templates[] = {
[0] = {
.opcode = P4_UOP_TYPE,
.config = 0,
.dep = -1,
.emask =
P4_EVENT_ATTR(P4_UOP_TYPE, TAGLOADS) |
P4_EVENT_ATTR(P4_UOP_TYPE, TAGSTORES),
.escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
.cntr = { 16, 17 },
},
[1] = {
.opcode = P4_GLOBAL_POWER_EVENTS,
.config = 0,
.dep = -1,
.emask =
P4_EVENT_ATTR(P4_GLOBAL_POWER_EVENTS, RUNNING),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.cntr = { 0, 2 },
},
[2] = {
.opcode = P4_INSTR_RETIRED,
.config = 0,
.dep = 0, /* needs front-end tagging */
.emask =
P4_EVENT_ATTR(P4_INSTR_RETIRED, NBOGUSNTAG) |
P4_EVENT_ATTR(P4_INSTR_RETIRED, NBOGUSTAG) |
P4_EVENT_ATTR(P4_INSTR_RETIRED, BOGUSNTAG) |
P4_EVENT_ATTR(P4_INSTR_RETIRED, BOGUSTAG),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.cntr = { 12, 14 },
},
[3] = {
.opcode = P4_BSQ_CACHE_REFERENCE,
.config = 0,
.dep = -1,
.emask =
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITM),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
.cntr = { 0, 2 },
},
[4] = {
.opcode = P4_BSQ_CACHE_REFERENCE,
.config = 0,
.dep = -1,
.emask =
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
.cntr = { 0, 3 },
},
[5] = {
.opcode = P4_RETIRED_BRANCH_TYPE,
.config = 0,
.dep = -1,
.emask =
P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CONDITIONAL) |
P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CALL) |
P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, RETURN) |
P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, INDIRECT),
.escr_msr = { MSR_P4_TBPU_ESCR0, MSR_P4_TBPU_ESCR1 },
.cntr = { 4, 6 },
},
[6] = {
.opcode = P4_MISPRED_BRANCH_RETIRED,
.config = 0,
.dep = -1,
.emask =
P4_EVENT_ATTR(P4_MISPRED_BRANCH_RETIRED, NBOGUS),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.cntr = { 12, 14 },
},
[7] = {
.opcode = P4_FSB_DATA_ACTIVITY,
.config = p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
.dep = -1,
.emask =
P4_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_DRV) |
P4_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_OWN),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.cntr = { 0, 2 },
},
};
static struct p4_event_template *p4_event_map[PERF_COUNT_HW_MAX] = {
/* non-halted CPU clocks */
[PERF_COUNT_HW_CPU_CYCLES] = &p4_templates[1],
/* retired instructions: dep on tagging the FSB */
[PERF_COUNT_HW_INSTRUCTIONS] = &p4_templates[2],
/* cache hits */
[PERF_COUNT_HW_CACHE_REFERENCES] = &p4_templates[3],
/* cache misses */
[PERF_COUNT_HW_CACHE_MISSES] = &p4_templates[4],
/* branch instructions retired */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = &p4_templates[5],
/* mispredicted branches retired */
[PERF_COUNT_HW_BRANCH_MISSES] = &p4_templates[6],
/* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
[PERF_COUNT_HW_BUS_CYCLES] = &p4_templates[7],
};
static u64 p4_pmu_event_map(int hw_event)
{
struct p4_event_template *tpl;
u64 config;
if (hw_event > ARRAY_SIZE(p4_event_map)) {
printk_once(KERN_ERR "PMU: Incorrect event index\n");
return 0;
}
tpl = p4_event_map[hw_event];
/*
* fill config up according to
* a predefined event template
*/
config = tpl->config;
config |= p4_config_pack_escr(P4_EVENT_UNPACK_EVENT(tpl->opcode) << P4_EVNTSEL_EVENT_SHIFT);
config |= p4_config_pack_escr(tpl->emask << P4_EVNTSEL_EVENTMASK_SHIFT);
config |= p4_config_pack_cccr(P4_EVENT_UNPACK_SELECTOR(tpl->opcode) << P4_CCCR_ESCR_SELECT_SHIFT);
/* on HT machine we need a special bit */
if (p4_ht_active() && p4_ht_thread(raw_smp_processor_id()))
config = p4_set_ht_bit(config);
return config;
}
/*
* Note that we still have 5 events (from global events SDM list)
* intersected in opcode+emask bits so we will need another
* scheme there do distinguish templates.
*/
static inline int p4_pmu_emask_match(unsigned int dst, unsigned int src)
{
return dst & src;
}
static struct p4_event_template *p4_pmu_template_lookup(u64 config)
{
u32 opcode = p4_config_unpack_opcode(config);
unsigned int emask = p4_config_unpack_emask(config);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(p4_templates); i++) {
if (opcode == p4_templates[i].opcode &&
p4_pmu_emask_match(emask, p4_templates[i].emask))
return &p4_templates[i];
}
return NULL;
}
/*
* We don't control raw events so it's up to the caller
* to pass sane values (and we don't count the thread number
* on HT machine but allow HT-compatible specifics to be
* passed on)
*/
static u64 p4_pmu_raw_event(u64 hw_event)
{
return hw_event &
(p4_config_pack_escr(P4_EVNTSEL_MASK_HT) |
p4_config_pack_cccr(P4_CCCR_MASK_HT));
}
static int p4_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
{
int cpu = raw_smp_processor_id();
/*
* the reason we use cpu that early is that: if we get scheduled
* first time on the same cpu -- we will not need swap thread
* specific flags in config (and will save some cpu cycles)
*/
/* CCCR by default */
hwc->config = p4_config_pack_cccr(p4_default_cccr_conf(cpu));
/* Count user and OS events unless not requested to */
hwc->config |= p4_config_pack_escr(p4_default_escr_conf(cpu, attr->exclude_kernel,
attr->exclude_user));
return 0;
}
static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
{
unsigned long dummy;
rdmsrl(hwc->config_base + hwc->idx, dummy);
if (dummy & P4_CCCR_OVF) {
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
((u64)dummy) & ~P4_CCCR_OVF);
}
}
static inline void p4_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/*
* If event gets disabled while counter is in overflowed
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
(u64)(p4_config_unpack_cccr(hwc->config)) &
~P4_CCCR_ENABLE & ~P4_CCCR_OVF);
}
static void p4_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_events; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
p4_pmu_disable_event(event);
}
}
static void p4_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int thread = p4_ht_config_thread(hwc->config);
u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
u64 escr_base;
struct p4_event_template *tpl;
struct p4_pmu_res *c;
/*
* some preparation work from per-cpu private fields
* since we need to find out which ESCR to use
*/
c = &__get_cpu_var(p4_pmu_config);
tpl = c->tpl[hwc->idx];
if (!tpl) {
pr_crit("%s: Wrong index: %d\n", __func__, hwc->idx);
return;
}
escr_base = (u64)tpl->escr_msr[thread];
/*
* - we dont support cascaded counters yet
* - and counter 1 is broken (erratum)
*/
WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
WARN_ON_ONCE(hwc->idx == 1);
(void)checking_wrmsrl(escr_base, escr_conf);
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
(u64)(p4_config_unpack_cccr(hwc->config)) | P4_CCCR_ENABLE);
}
static void p4_pmu_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_events; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
p4_pmu_enable_event(event);
}
}
static int p4_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
data.addr = 0;
data.raw = NULL;
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_events; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
event = cpuc->events[idx];
hwc = &event->hw;
WARN_ON_ONCE(hwc->idx != idx);
/*
* FIXME: Redundant call, actually not needed
* but just to check if we're screwed
*/
p4_pmu_clear_cccr_ovf(hwc);
val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.event_bits - 1)))
continue;
/*
* event overflow
*/
handled = 1;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
p4_pmu_disable_event(event);
}
if (handled) {
/* p4 quirk: unmask it again */
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
inc_irq_stat(apic_perf_irqs);
}
return handled;
}
/*
* swap thread specific fields according to a thread
* we are going to run on
*/
static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
{
u32 escr, cccr;
/*
* we either lucky and continue on same cpu or no HT support
*/
if (!p4_should_swap_ts(hwc->config, cpu))
return;
/*
* the event is migrated from an another logical
* cpu, so we need to swap thread specific flags
*/
escr = p4_config_unpack_escr(hwc->config);
cccr = p4_config_unpack_cccr(hwc->config);
if (p4_ht_thread(cpu)) {
cccr &= ~P4_CCCR_OVF_PMI_T0;
cccr |= P4_CCCR_OVF_PMI_T1;
if (escr & P4_EVNTSEL_T0_OS) {
escr &= ~P4_EVNTSEL_T0_OS;
escr |= P4_EVNTSEL_T1_OS;
}
if (escr & P4_EVNTSEL_T0_USR) {
escr &= ~P4_EVNTSEL_T0_USR;
escr |= P4_EVNTSEL_T1_USR;
}
hwc->config = p4_config_pack_escr(escr);
hwc->config |= p4_config_pack_cccr(cccr);
hwc->config |= P4_CONFIG_HT;
} else {
cccr &= ~P4_CCCR_OVF_PMI_T1;
cccr |= P4_CCCR_OVF_PMI_T0;
if (escr & P4_EVNTSEL_T1_OS) {
escr &= ~P4_EVNTSEL_T1_OS;
escr |= P4_EVNTSEL_T0_OS;
}
if (escr & P4_EVNTSEL_T1_USR) {
escr &= ~P4_EVNTSEL_T1_USR;
escr |= P4_EVNTSEL_T0_USR;
}
hwc->config = p4_config_pack_escr(escr);
hwc->config |= p4_config_pack_cccr(cccr);
hwc->config &= ~P4_CONFIG_HT;
}
}
/* ESCRs are not sequential in memory so we need a map */
static unsigned int p4_escr_map[ARCH_P4_TOTAL_ESCR] = {
MSR_P4_ALF_ESCR0, /* 0 */
MSR_P4_ALF_ESCR1, /* 1 */
MSR_P4_BPU_ESCR0, /* 2 */
MSR_P4_BPU_ESCR1, /* 3 */
MSR_P4_BSU_ESCR0, /* 4 */
MSR_P4_BSU_ESCR1, /* 5 */
MSR_P4_CRU_ESCR0, /* 6 */
MSR_P4_CRU_ESCR1, /* 7 */
MSR_P4_CRU_ESCR2, /* 8 */
MSR_P4_CRU_ESCR3, /* 9 */
MSR_P4_CRU_ESCR4, /* 10 */
MSR_P4_CRU_ESCR5, /* 11 */
MSR_P4_DAC_ESCR0, /* 12 */
MSR_P4_DAC_ESCR1, /* 13 */
MSR_P4_FIRM_ESCR0, /* 14 */
MSR_P4_FIRM_ESCR1, /* 15 */
MSR_P4_FLAME_ESCR0, /* 16 */
MSR_P4_FLAME_ESCR1, /* 17 */
MSR_P4_FSB_ESCR0, /* 18 */
MSR_P4_FSB_ESCR1, /* 19 */
MSR_P4_IQ_ESCR0, /* 20 */
MSR_P4_IQ_ESCR1, /* 21 */
MSR_P4_IS_ESCR0, /* 22 */