S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
- __local_ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __local_ctl_load(S390_lowcore.user_asce, 7, 7);
- __local_ctl_load(S390_lowcore.kernel_asce, 13, 13);
+ local_ctl_load(1, &S390_lowcore.kernel_asce);
+ local_ctl_load(7, &S390_lowcore.user_asce);
+ local_ctl_load(13, &S390_lowcore.kernel_asce);
init_mm.context.asce = S390_lowcore.kernel_asce;
}
{
unsigned long reg;
- __local_ctl_store(reg, cr, cr);
+ local_ctl_store(cr, ®);
reg |= 1UL << bit;
- __local_ctl_load(reg, cr, cr);
+ local_ctl_load(cr, ®);
}
static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
- __local_ctl_store(reg, cr, cr);
+ local_ctl_store(cr, ®);
reg &= ~(1UL << bit);
- __local_ctl_load(reg, cr, cr);
+ local_ctl_load(cr, ®);
}
void system_ctlreg_lock(void);
__u32 restart_flags; /* 0x0384 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0388 */
- __u64 user_asce; /* 0x0390 */
+ unsigned long kernel_asce; /* 0x0388 */
+ unsigned long user_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
__u32 clock_comp_save_area[2]; /* 0x1330 */
__u64 last_break_save_area; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
- __u64 cregs_save_area[16]; /* 0x1380 */
+ unsigned long cregs_save_area[16]; /* 0x1380 */
__u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */
/* Cryptography-counter designation */
__u64 ccd; /* 0x1500 */
S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
- __local_ctl_load(s390_invalid_asce, 7, 7);
+ local_ctl_load(7, &s390_invalid_asce);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- __local_ctl_load(S390_lowcore.user_asce, 7, 7);
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
#define activate_mm activate_mm
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- __local_ctl_load(S390_lowcore.user_asce, 7, 7);
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
#include <asm-generic/mmu_context.h>
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
- __local_ctl_store(cr2_old.val, 2, 2);
+ local_ctl_store(2, &cr2_old.val);
cr2_new = cr2_old;
cr2_new.gse = 1;
- __local_ctl_load(cr2_new.val, 2, 2);
+ local_ctl_load(2, &cr2_new.val);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
- __local_ctl_load(cr2_old.val, 2, 2);
+ local_ctl_load(2, &cr2_old.val);
}
/*
* To create a good backchain for this CPU in the dump store_status
* Disable low address protection and make machine check new PSW a
* disabled wait PSW. Any additional machine check cannot be handled.
*/
- __local_ctl_store(cr0.val, 0, 0);
+ local_ctl_store(0, &cr0.val);
cr0_new = cr0;
cr0_new.lap = 0;
- __local_ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.val);
psw_save = S390_lowcore.mcck_new_psw;
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
* values. This makes possible system dump analysis easier.
*/
S390_lowcore.mcck_new_psw = psw_save;
- __local_ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.val);
disabled_wait();
while (1);
}
kill_task = 1;
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
- __local_ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.val);
asm volatile(
" la 1,%0\n"
" VLM 0,15,0,1\n"
:
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
: "1");
- __local_ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
+ local_ctl_load(0, &S390_lowcore.cregs_save_area[0]);
}
/* Validate access registers */
asm volatile(
union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
- __local_ctl_store(cr0_old.val, 0, 0);
- __local_ctl_store(cr2_old.val, 2, 2);
+ local_ctl_store(0, &cr0_old.val);
+ local_ctl_store(2, &cr2_old.val);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
- __local_ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.val);
if (cr2_changed)
- __local_ctl_load(cr2_new.val, 2, 2);
+ local_ctl_load(2, &cr2_new.val);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
__ctl_duct[4] = (unsigned long)__ctl_duald;
/* Update control registers CR2, CR5 and CR15 */
- __local_ctl_store(cr2.val, 2, 2);
- __local_ctl_store(cr5.val, 5, 5);
- __local_ctl_store(cr15.val, 15, 15);
+ local_ctl_store(2, &cr2.val);
+ local_ctl_store(5, &cr5.val);
+ local_ctl_store(15, &cr15.val);
cr2.ducto = (unsigned long)__ctl_duct >> 6;
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
- __local_ctl_load(cr2.val, 2, 2);
- __local_ctl_load(cr5.val, 5, 5);
- __local_ctl_load(cr15.val, 15, 15);
+ local_ctl_load(2, &cr2.val);
+ local_ctl_load(5, &cr5.val);
+ local_ctl_load(15, &cr15.val);
}
/*
{
unsigned long cr1, cr7;
- __local_ctl_store(cr1, 1, 1);
- __local_ctl_store(cr7, 7, 7);
+ local_ctl_store(1, &cr1);
+ local_ctl_store(7, &cr7);
if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
- "kernel: %016llx user: %016llx\n",
+ "kernel: %016lx user: %016lx\n",
exit ? "exit" : "entry", cr1, cr7,
S390_lowcore.kernel_asce, S390_lowcore.user_asce);
}
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
- __local_ctl_load(S390_lowcore.user_asce, 7, 7);
+ local_ctl_load(7, &S390_lowcore.user_asce);
}
__tlb_flush_local();
}
/* Enable service-signal interruption, disable timer interrupts */
old_tick = local_tick_disable();
trace_hardirqs_on();
- __local_ctl_store(cr0, 0, 0);
+ local_ctl_store(0, &cr0);
cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
cr0_sync |= 1UL << (63 - 54);
- __local_ctl_load(cr0_sync, 0, 0);
+ local_ctl_load(0, &cr0_sync);
__arch_local_irq_stosm(0x01);
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
cpu_relax();
}
local_irq_disable();
- __local_ctl_load(cr0, 0, 0);
+ local_ctl_load(0, &cr0);
if (!irq_context)
_local_bh_enable();
local_tick_enable(old_tick);
psw_t psw_ext_save, psw_wait;
union ctlreg0 cr0, cr0_new;
- __local_ctl_store(cr0.val, 0, 0);
+ local_ctl_store(0, &cr0.val);
cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_new.lap = 0;
cr0_new.sssm = 1;
- __local_ctl_load(cr0_new.val, 0, 0);
+ local_ctl_load(0, &cr0_new.val);
psw_ext_save = S390_lowcore.external_new_psw;
psw_mask = __extract_psw();
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
S390_lowcore.external_new_psw = psw_ext_save;
- __local_ctl_load(cr0.val, 0, 0);
+ local_ctl_load(0, &cr0.val);
}
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)