extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
+unsigned long vdso_text_size(void);
unsigned long vdso_size(void);
/*
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
+ OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
return false;
}
+static inline bool ip_within_vdso(unsigned long ip)
+{
+ return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
+}
+
void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
struct perf_callchain_entry_ctx *entry,
const struct pt_regs *regs, bool perf)
{
+ struct stack_frame_vdso_wrapper __user *sf_vdso;
struct stack_frame_user __user *sf;
unsigned long ip, sp;
bool first = true;
while (1) {
if (__get_user(sp, &sf->back_chain))
break;
+ /*
+ * VDSO entry code has a non-standard stack frame layout.
+ * See VDSO user wrapper code for details.
+ */
+ if (!sp && ip_within_vdso(ip)) {
+ sf_vdso = (void __user *)sf;
+ if (__get_user(ip, &sf_vdso->return_address))
+ break;
+ sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
+ sf = (void __user *)sp;
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ } else {
+ sf = (void __user *)sp;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ }
/* Sanity check: ABI requires SP to be 8 byte aligned. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
- if (__get_user(ip, &sf->gprs[8]))
+ if (sp & 0x7)
break;
if (ip_invalid(ip)) {
/*
return addr;
}
-unsigned long vdso_size(void)
+unsigned long vdso_text_size(void)
{
- unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+ unsigned long size;
if (is_compat_task())
- size += vdso32_end - vdso32_start;
+ size = vdso32_end - vdso32_start;
else
- size += vdso64_end - vdso64_start;
+ size = vdso64_end - vdso64_start;
return PAGE_ALIGN(size);
}
+unsigned long vdso_size(void)
+{
+ return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr = VDSO_BASE;
CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
stg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
+ xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
brasl %r14,__s390_vdso_\func
lg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_RESTORE 14