Force gcc to generate a stack frame for the call to dlsym(RTLD_NEXT).

Without this hack at least amd64 -O2 just used jmp and The Wrong
Thing happened.
This commit is contained in:
pooka 2011-02-07 19:34:39 +00:00
parent 9db32a0d38
commit 6aea514dcb
1 changed files with 16 additions and 3 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: hijack.c,v 1.31 2011/02/07 12:23:05 pooka Exp $ */
/* $NetBSD: hijack.c,v 1.32 2011/02/07 19:34:39 pooka Exp $ */
/*-
* Copyright (c) 2011 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: hijack.c,v 1.31 2011/02/07 12:23:05 pooka Exp $");
__RCSID("$NetBSD: hijack.c,v 1.32 2011/02/07 19:34:39 pooka Exp $");
#define __ssp_weak_name(fun) _hijack_ ## fun
@ -174,12 +174,25 @@ type name args \
/*
* This is called from librumpclient in case of LD_PRELOAD.
* It ensures correct RTLD_NEXT.
*
* ... except, it's apparently extremely difficult to force
* at least gcc to generate an actual stack frame here. So
* sprinkle some volatile foobar and baz to throw the optimizer
* off the scent and generate a variable assignment with the
* return value. The posterboy for this meltdown is amd64
* with -O2. At least with gcc 4.1.3 i386 works regardless of
* optimization.
*/
volatile int rumphijack_unrope; /* there, unhang yourself */
static void *
hijackdlsym(void *handle, const char *symbol)
{
void *rv;
return dlsym(handle, symbol);
rv = dlsym(handle, symbol);
rumphijack_unrope = *(volatile int *)rv;
return (void *)rv;
}
/* low calorie sockets? */