{
if (test_facility(8)) {
machine.has_edat1 = 1;
- __ctl_set_bit(0, 23);
+ local_ctl_set_bit(0, 23);
}
if (test_facility(78))
machine.has_edat2 = 1;
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- __ctl_load(S390_lowcore.kernel_asce, 13, 13);
+ __local_ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.kernel_asce, 13, 13);
init_mm.context.asce = S390_lowcore.kernel_asce;
}
#include <linux/bug.h>
-#define __ctl_load(array, low, high) do { \
+#define __local_ctl_load(array, low, high) do { \
struct addrtype { \
char _[sizeof(array)]; \
}; \
: "memory"); \
} while (0)
-#define __ctl_store(array, low, high) do { \
+#define __local_ctl_store(array, low, high) do { \
struct addrtype { \
char _[sizeof(array)]; \
}; \
: [_low] "i" (low), [_high] "i" (high)); \
} while (0)
-static __always_inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+static __always_inline void local_ctl_set_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
- __ctl_store(reg, cr, cr);
+ __local_ctl_store(reg, cr, cr);
reg |= 1UL << bit;
- __ctl_load(reg, cr, cr);
+ __local_ctl_load(reg, cr, cr);
}
-static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
- __ctl_store(reg, cr, cr);
+ __local_ctl_store(reg, cr, cr);
reg &= ~(1UL << bit);
- __ctl_load(reg, cr, cr);
+ __local_ctl_load(reg, cr, cr);
}
-void ctlreg_lock(void);
-void ctlreg_unlock(void);
-void ctl_set_clear_bit(int cr, int bit, bool set);
+void system_ctlreg_lock(void);
+void system_ctlreg_unlock(void);
+void system_ctl_set_clear_bit(unsigned int cr, unsigned int bit, bool set);
-static inline void ctl_set_bit(int cr, int bit)
+static inline void system_ctl_set_bit(unsigned int cr, unsigned int bit)
{
- ctl_set_clear_bit(cr, bit, true);
+ system_ctl_set_clear_bit(cr, bit, true);
}
-static inline void ctl_clear_bit(int cr, int bit)
+static inline void system_ctl_clear_bit(unsigned int cr, unsigned int bit)
{
- ctl_set_clear_bit(cr, bit, false);
+ system_ctl_set_clear_bit(cr, bit, false);
}
union ctlreg0 {
S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
- __ctl_load(s390_invalid_asce, 7, 7);
+ __local_ctl_load(s390_invalid_asce, 7, 7);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
}
#define activate_mm activate_mm
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
}
#include <asm-generic/mmu_context.h>
* are kept in the control register save area within absolute lowcore
* at physical address zero.
*/
-static DEFINE_SPINLOCK(ctl_lock);
+static DEFINE_SPINLOCK(system_ctl_lock);
-void ctlreg_lock(void)
- __acquires(&ctl_lock)
+void system_ctlreg_lock(void)
+ __acquires(&system_ctl_lock)
{
- spin_lock(&ctl_lock);
+ spin_lock(&system_ctl_lock);
}
-void ctlreg_unlock(void)
- __releases(&ctl_lock)
+void system_ctlreg_unlock(void)
+ __releases(&system_ctl_lock)
{
- spin_unlock(&ctl_lock);
+ spin_unlock(&system_ctl_lock);
}
struct ctl_bit_parms {
struct ctl_bit_parms *pp = info;
unsigned long regs[16];
- __ctl_store(regs, 0, 15);
+ __local_ctl_store(regs, 0, 15);
regs[pp->cr] &= pp->andval;
regs[pp->cr] |= pp->orval;
- __ctl_load(regs, 0, 15);
+ __local_ctl_load(regs, 0, 15);
}
-void ctl_set_clear_bit(int cr, int bit, bool set)
+void system_ctl_set_clear_bit(unsigned int cr, unsigned int bit, bool set)
{
struct ctl_bit_parms pp = { .cr = cr, };
struct lowcore *abs_lc;
pp.orval = set ? 1UL << bit : 0;
pp.andval = set ? -1UL : ~(1UL << bit);
- ctlreg_lock();
+ system_ctlreg_lock();
abs_lc = get_abs_lowcore();
abs_lc->cregs_save_area[cr] &= pp.andval;
abs_lc->cregs_save_area[cr] |= pp.orval;
put_abs_lowcore(abs_lc);
on_each_cpu(ctl_bit_callback, &pp, 1);
- ctlreg_unlock();
+ system_ctlreg_unlock();
}
-EXPORT_SYMBOL(ctl_set_clear_bit);
+EXPORT_SYMBOL(system_ctl_set_clear_bit);
{
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
- __ctl_set_bit(0, 23);
+ local_ctl_set_bit(0, 23);
}
if (test_facility(78))
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
- __ctl_set_bit(0, 55);
+ local_ctl_set_bit(0, 55);
}
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
- __ctl_set_bit(0, 17);
+ local_ctl_set_bit(0, 17);
}
if (test_facility(130))
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
/* Enabled signed clock comparator comparisons */
S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
- __ctl_set_bit(0, 53);
+ local_ctl_set_bit(0, 53);
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
static inline void setup_low_address_protection(void)
{
- __ctl_set_bit(0, 28);
+ local_ctl_set_bit(0, 28);
}
static inline void setup_access_registers(void)
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
- __ctl_clear_bit(0, 17);
+ local_ctl_clear_bit(0, 17);
return 0;
}
early_param("novx", disable_vector_extension);
return -ENOMEM;
gs_cb->gsd = 25;
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
preempt_enable();
preempt_disable();
kfree(current->thread.gs_cb);
current->thread.gs_cb = NULL;
- __ctl_clear_bit(2, 4);
+ local_ctl_clear_bit(2, 4);
preempt_enable();
}
return 0;
if (gs_cb) {
kfree(current->thread.gs_cb);
current->thread.gs_bc_cb = NULL;
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
}
set_prefix(0);
/* Disable lowcore protection */
- __ctl_clear_bit(0, 28);
+ local_ctl_clear_bit(0, 28);
diag_amode31_ops.diag308_reset();
}
{
spin_lock(&irq_subclass_lock);
if (!irq_subclass_refcount[subclass])
- ctl_set_bit(0, subclass);
+ system_ctl_set_bit(0, subclass);
irq_subclass_refcount[subclass]++;
spin_unlock(&irq_subclass_lock);
}
spin_lock(&irq_subclass_lock);
irq_subclass_refcount[subclass]--;
if (!irq_subclass_refcount[subclass])
- ctl_clear_bit(0, subclass);
+ system_ctl_clear_bit(0, subclass);
spin_unlock(&irq_subclass_lock);
}
EXPORT_SYMBOL(irq_subclass_unregister);
per_kprobe.end = ip;
/* Save control regs and psw mask */
- __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_store(kcb->kprobe_saved_ctl, 9, 11);
kcb->kprobe_saved_imask = regs->psw.mask &
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Set PER control regs, turns on single step for the given address */
- __ctl_load(per_kprobe, 9, 11);
+ __local_ctl_load(per_kprobe, 9, 11);
regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip;
unsigned long ip)
{
/* Restore control regs and psw mask, set new psw address */
- __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_load(kcb->kprobe_saved_ctl, 9, 11);
regs->psw.mask &= ~PSW_MASK_PER;
regs->psw.mask |= kcb->kprobe_saved_imask;
regs->psw.addr = ip;
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
- __ctl_store(cr2_old.val, 2, 2);
+ __local_ctl_store(cr2_old.val, 2, 2);
cr2_new = cr2_old;
cr2_new.gse = 1;
- __ctl_load(cr2_new.val, 2, 2);
+ __local_ctl_load(cr2_new.val, 2, 2);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
- __ctl_load(cr2_old.val, 2, 2);
+ __local_ctl_load(cr2_old.val, 2, 2);
}
/*
* To create a good backchain for this CPU in the dump store_status
* Disable low address protection and make machine check new PSW a
* disabled wait PSW. Any additional machine check cannot be handled.
*/
- __ctl_store(cr0.val, 0, 0);
+ __local_ctl_store(cr0.val, 0, 0);
cr0_new = cr0;
cr0_new.lap = 0;
- __ctl_load(cr0_new.val, 0, 0);
+ __local_ctl_load(cr0_new.val, 0, 0);
psw_save = S390_lowcore.mcck_new_psw;
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
* values. This makes possible system dump analysis easier.
*/
S390_lowcore.mcck_new_psw = psw_save;
- __ctl_load(cr0.val, 0, 0);
+ __local_ctl_load(cr0.val, 0, 0);
disabled_wait();
while (1);
}
static int mchchk_wng_posted = 0;
/* Use single cpu clear, as we cannot handle smp here. */
- __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
+ local_ctl_clear_bit(14, 24); /* Disable WARNING MCH */
if (xchg(&mchchk_wng_posted, 1) == 0)
kill_cad_pid(SIGPWR, 1);
}
kill_task = 1;
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
- __ctl_load(cr0.val, 0, 0);
+ __local_ctl_load(cr0.val, 0, 0);
asm volatile(
" la 1,%0\n"
" VLM 0,15,0,1\n"
:
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
: "1");
- __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
+ __local_ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
/* Validate access registers */
asm volatile(
static int __init machine_check_init(void)
{
- ctl_set_bit(14, 25); /* enable external damage MCH */
- ctl_set_bit(14, 27); /* enable system recovery MCH */
- ctl_set_bit(14, 24); /* enable warning MCH */
+ system_ctl_set_bit(14, 25); /* enable external damage MCH */
+ system_ctl_set_bit(14, 27); /* enable system recovery MCH */
+ system_ctl_set_bit(14, 24); /* enable warning MCH */
return 0;
}
early_initcall(machine_check_init);
* Clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters
*/
- ctl_clear_bit(0, 48);
+ system_ctl_clear_bit(0, 48);
/* register handler for measurement-alert interruptions */
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
if (++cpump->active_events == 1) {
ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
WRITE_ONCE(S390_lowcore.ccd, ccd);
- __ctl_set_bit(0, 50);
+ local_ctl_set_bit(0, 50);
}
cpump->event = event;
if (flags & PERF_EF_START && !event->attr.sample_period) {
/* Only counting needs to read counter */
paicrypt_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
- __ctl_clear_bit(0, 50);
+ local_ctl_clear_bit(0, 50);
WRITE_ONCE(S390_lowcore.ccd, 0);
}
}
S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
pcb->acc = virt_to_phys(cpump->area) | 0x1;
/* Enable CPU instruction lookup for PAIE1 control block */
- __ctl_set_bit(0, 49);
+ local_ctl_set_bit(0, 49);
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
__func__, S390_lowcore.aicd, pcb->acc);
}
}
if (--cpump->active_events == 0) {
/* Disable CPU instruction lookup for PAIE1 control block */
- __ctl_clear_bit(0, 49);
+ local_ctl_clear_bit(0, 49);
pcb->acc = 0;
S390_lowcore.aicd = 0;
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
- __ctl_store(cr0_old.val, 0, 0);
- __ctl_store(cr2_old.val, 2, 2);
+ __local_ctl_store(cr0_old.val, 0, 0);
+ __local_ctl_store(cr2_old.val, 2, 2);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
- __ctl_load(cr0_new.val, 0, 0);
+ __local_ctl_load(cr0_new.val, 0, 0);
if (cr2_changed)
- __ctl_load(cr2_new.val, 2, 2);
+ __local_ctl_load(cr2_new.val, 2, 2);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
return;
}
regs->psw.mask |= PSW_MASK_PER;
- __ctl_store(old, 9, 11);
+ __local_ctl_store(old, 9, 11);
if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
- __ctl_load(new, 9, 11);
+ __local_ctl_load(new, 9, 11);
}
void user_enable_single_step(struct task_struct *task)
target->thread.gs_cb = data;
*target->thread.gs_cb = gs_cb;
if (target == current) {
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
restore_gs_cb(target->thread.gs_cb);
}
preempt_enable();
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1U;
- __ctl_store(lc->cregs_save_area, 0, 15);
+ __local_ctl_store(lc->cregs_save_area, 0, 15);
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
__ctl_duct[4] = (unsigned long)__ctl_duald;
/* Update control registers CR2, CR5 and CR15 */
- __ctl_store(cr2.val, 2, 2);
- __ctl_store(cr5.val, 5, 5);
- __ctl_store(cr15.val, 15, 15);
+ __local_ctl_store(cr2.val, 2, 2);
+ __local_ctl_store(cr5.val, 5, 5);
+ __local_ctl_store(cr15.val, 15, 15);
cr2.ducto = (unsigned long)__ctl_duct >> 6;
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
- __ctl_load(cr2.val, 2, 2);
- __ctl_load(cr5.val, 5, 5);
- __ctl_load(cr15.val, 15, 15);
+ __local_ctl_load(cr2.val, 2, 2);
+ __local_ctl_load(cr5.val, 5, 5);
+ __local_ctl_load(cr15.val, 15, 15);
}
/*
* Make sure global control register contents do not change
* until new CPU has initialized control registers.
*/
- ctlreg_lock();
+ system_ctlreg_lock();
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu))
cpu_relax();
- ctlreg_unlock();
+ system_ctlreg_unlock();
return 0;
}
/* Disable pseudo page faults on this cpu. */
pfault_fini();
/* Disable interrupt sources via control register. */
- __ctl_store(cregs, 0, 15);
+ __local_ctl_store(cregs, 0, 15);
cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
- __ctl_load(cregs, 0, 15);
+ __local_ctl_load(cregs, 0, 15);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;
}
{
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
- ctl_set_bit(0, 14);
+ system_ctl_set_bit(0, 14);
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
- ctl_set_bit(0, 13);
+ system_ctl_set_bit(0, 13);
}
void __init smp_prepare_boot_cpu(void)
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */
- __ctl_set_bit(0,11);
+ local_ctl_set_bit(0, 11);
/* Always allow the timing alert external interrupt. */
- __ctl_set_bit(0, 4);
+ local_ctl_set_bit(0, 4);
}
static void clock_comparator_interrupt(struct ext_code ext_code,
}
if (MACHINE_HAS_GS) {
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
if (current->thread.gs_cb) {
vcpu->arch.host_gscb = current->thread.gs_cb;
save_gs_cb(vcpu->arch.host_gscb);
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
if (MACHINE_HAS_GS) {
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
if (vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
current->thread.gs_cb = vcpu->arch.host_gscb;
restore_gs_cb(vcpu->arch.host_gscb);
if (!vcpu->arch.host_gscb)
- __ctl_clear_bit(2, 4);
+ local_ctl_clear_bit(2, 4);
vcpu->arch.host_gscb = NULL;
preempt_enable();
}
if (test_kvm_facility(vcpu->kvm, 133)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
restore_gs_cb(current->thread.gs_cb);
preempt_enable();
{
unsigned long cr1, cr7;
- __ctl_store(cr1, 1, 1);
- __ctl_store(cr7, 7, 7);
+ __local_ctl_store(cr1, 1, 1);
+ __local_ctl_store(cr7, 7, 7);
if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
return;
panic("incorrect ASCE on kernel %s\n"
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
}
__tlb_flush_local();
}
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
}
if (MACHINE_HAS_NX)
- ctl_set_bit(0, 20);
+ system_ctl_set_bit(0, 20);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
if (MACHINE_HAS_PCI_MIO) {
static_branch_enable(&have_mio);
- ctl_set_bit(2, 5);
+ system_ctl_set_bit(2, 5);
}
rc = zpci_debug_init();
/* Enable service-signal interruption, disable timer interrupts */
old_tick = local_tick_disable();
trace_hardirqs_on();
- __ctl_store(cr0, 0, 0);
+ __local_ctl_store(cr0, 0, 0);
cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
cr0_sync |= 1UL << (63 - 54);
- __ctl_load(cr0_sync, 0, 0);
+ __local_ctl_load(cr0_sync, 0, 0);
__arch_local_irq_stosm(0x01);
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
cpu_relax();
}
local_irq_disable();
- __ctl_load(cr0, 0, 0);
+ __local_ctl_load(cr0, 0, 0);
if (!irq_context)
_local_bh_enable();
local_tick_enable(old_tick);
psw_t psw_ext_save, psw_wait;
union ctlreg0 cr0, cr0_new;
- __ctl_store(cr0.val, 0, 0);
+ __local_ctl_store(cr0.val, 0, 0);
cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_new.lap = 0;
cr0_new.sssm = 1;
- __ctl_load(cr0_new.val, 0, 0);
+ __local_ctl_load(cr0_new.val, 0, 0);
psw_ext_save = S390_lowcore.external_new_psw;
psw_mask = __extract_psw();
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
S390_lowcore.external_new_psw = psw_ext_save;
- __ctl_load(cr0.val, 0, 0);
+ __local_ctl_load(cr0.val, 0, 0);
}
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
task = kthread_run(crw_collect_info, NULL, "kmcheck");
if (IS_ERR(task))
return PTR_ERR(task);
- ctl_set_bit(14, 28); /* enable channel report MCH */
+ system_ctl_set_bit(14, 28); /* enable channel report MCH */
return 0;
}
device_initcall(crw_machine_check_init);
spin_lock(&isc_ref_lock);
if (isc_refs[isc] == 0)
- ctl_set_bit(6, 31 - isc);
+ system_ctl_set_bit(6, 31 - isc);
isc_refs[isc]++;
spin_unlock(&isc_ref_lock);
}
goto out_unlock;
}
if (isc_refs[isc] == 1)
- ctl_clear_bit(6, 31 - isc);
+ system_ctl_clear_bit(6, 31 - isc);
isc_refs[isc]--;
out_unlock:
spin_unlock(&isc_ref_lock);
rc = -EPROTONOSUPPORT;
goto out;
}
- ctl_set_bit(0, 1);
+ system_ctl_set_bit(0, 1);
rc = iucv_query_maxconn();
if (rc)
goto out_ctl;
out_int:
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
out_ctl:
- ctl_clear_bit(0, 1);
+ system_ctl_clear_bit(0, 1);
out:
return rc;
}