summaryrefslogtreecommitdiff
path: root/libffi/src/x86
diff options
context:
space:
mode:
authorMatthias Benkard <code@mail.matthias.benkard.de>2008-01-26 12:06:34 +0100
committerMatthias Benkard <code@mail.matthias.benkard.de>2008-01-26 12:06:34 +0100
commit181d8ded82d49d0133d9d6fd1631d9816c970bfa (patch)
treec2791efcee34197c93ed964873c1ff30bf398330 /libffi/src/x86
parent28a686ef16077f75afbfa3d315cd268680e11b75 (diff)
Import libffi from PyObjC 1.3.7.
darcs-hash:129bccb59266f997deac9b0353aea2d2d4049f92
Diffstat (limited to 'libffi/src/x86')
-rw-r--r--libffi/src/x86/darwin.S195
-rw-r--r--libffi/src/x86/ffi.c557
-rw-r--r--libffi/src/x86/ffi64.c706
-rw-r--r--libffi/src/x86/ffi_darwin.c584
-rw-r--r--libffi/src/x86/ffitarget.h81
-rw-r--r--libffi/src/x86/sysv.S184
-rw-r--r--libffi/src/x86/unix64.S303
-rw-r--r--libffi/src/x86/win32.S259
8 files changed, 2869 insertions, 0 deletions
diff --git a/libffi/src/x86/darwin.S b/libffi/src/x86/darwin.S
new file mode 100644
index 0000000..c5e55b5
--- /dev/null
+++ b/libffi/src/x86/darwin.S
@@ -0,0 +1,195 @@
+#ifdef __i386__
+/* -----------------------------------------------------------------------
+ darwin.S - Copyright (c) 1996, 1998, 2001, 2002, 2003 Red Hat, Inc.
+
+ X86 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+/*
+ * This file is based on sysv.S and then hacked up by Ronald who hasn't done
+ * assembly programming in 8 years.
+ */
+
+#ifndef __x86_64__
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+.text
+
+.globl _ffi_prep_args
+
+.align 4
+.globl _ffi_call_SYSV
+
+_ffi_call_SYSV:
+.LFB1:
+ pushl %ebp
+.LCFI0:
+ movl %esp,%ebp
+.LCFI1:
+ /* Make room for all of the new args. */
+ movl 16(%ebp),%ecx
+ subl %ecx,%esp
+
+ movl %esp,%eax
+
+ /* Place all of the ffi_prep_args in position */
+ pushl 12(%ebp)
+ pushl %eax
+ call *8(%ebp)
+
+ /* Return stack to previous state and call the function */
+ addl $8,%esp
+
+ call *28(%ebp)
+
+ /* Remove the space we pushed for the args */
+ movl 16(%ebp),%ecx
+ addl %ecx,%esp
+
+ /* Load %ecx with the return type code */
+ movl 20(%ebp),%ecx
+
+ /* If the return value pointer is NULL, assume no return value. */
+ cmpl $0,24(%ebp)
+ jne retint
+
+ /* Even if there is no space for the return value, we are
+ obliged to handle floating-point values. */
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne noretval
+ fstp %st(0)
+
+ jmp epilogue
+
+retint:
+ cmpl $FFI_TYPE_INT,%ecx
+ jne retfloat
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ jmp epilogue
+
+retfloat:
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne retdouble
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ fstps (%ecx)
+ jmp epilogue
+
+retdouble:
+ cmpl $FFI_TYPE_DOUBLE,%ecx
+ jne retlongdouble
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ fstpl (%ecx)
+ jmp epilogue
+
+retlongdouble:
+ cmpl $FFI_TYPE_LONGDOUBLE,%ecx
+ jne retint64
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ fstpt (%ecx)
+ jmp epilogue
+
+retint64:
+ cmpl $FFI_TYPE_SINT64,%ecx
+ jne retstruct
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ movl %edx,4(%ecx)
+
+retstruct:
+ /* Nothing to do! */
+
+noretval:
+epilogue:
+ movl %ebp,%esp
+ popl %ebp
+ ret
+.LFE1:
+.ffi_call_SYSV_end:
+#if 0
+ .size ffi_call_SYSV,.ffi_call_SYSV_end-ffi_call_SYSV
+#endif
+
+#if 0
+ .section .eh_frame,EH_FRAME_FLAGS,@progbits
+.Lframe1:
+ .long .LECIE1-.LSCIE1 /* Length of Common Information Entry */
+.LSCIE1:
+ .long 0x0 /* CIE Identifier Tag */
+ .byte 0x1 /* CIE Version */
+#ifdef __PIC__
+ .ascii "zR\0" /* CIE Augmentation */
+#else
+ .ascii "\0" /* CIE Augmentation */
+#endif
+ .byte 0x1 /* .uleb128 0x1; CIE Code Alignment Factor */
+ .byte 0x7c /* .sleb128 -4; CIE Data Alignment Factor */
+ .byte 0x8 /* CIE RA Column */
+#ifdef __PIC__
+ .byte 0x1 /* .uleb128 0x1; Augmentation size */
+ .byte 0x1b /* FDE Encoding (pcrel sdata4) */
+#endif
+ .byte 0xc /* DW_CFA_def_cfa */
+ .byte 0x4 /* .uleb128 0x4 */
+ .byte 0x4 /* .uleb128 0x4 */
+ .byte 0x88 /* DW_CFA_offset, column 0x8 */
+ .byte 0x1 /* .uleb128 0x1 */
+ .align 4
+.LECIE1:
+.LSFDE1:
+ .long .LEFDE1-.LASFDE1 /* FDE Length */
+.LASFDE1:
+ .long .LASFDE1-.Lframe1 /* FDE CIE offset */
+#ifdef __PIC__
+ .long .LFB1-. /* FDE initial location */
+#else
+ .long .LFB1 /* FDE initial location */
+#endif
+ .long .LFE1-.LFB1 /* FDE address range */
+#ifdef __PIC__
+ .byte 0x0 /* .uleb128 0x0; Augmentation size */
+#endif
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .long .LCFI0-.LFB1
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .byte 0x8 /* .uleb128 0x8 */
+ .byte 0x85 /* DW_CFA_offset, column 0x5 */
+ .byte 0x2 /* .uleb128 0x2 */
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .long .LCFI1-.LCFI0
+ .byte 0xd /* DW_CFA_def_cfa_register */
+ .byte 0x5 /* .uleb128 0x5 */
+ .align 4
+.LEFDE1:
+#endif
+
+#endif /* ifndef __x86_64__ */
+
+#endif /* defined __i386__ */
diff --git a/libffi/src/x86/ffi.c b/libffi/src/x86/ffi.c
new file mode 100644
index 0000000..633e549
--- /dev/null
+++ b/libffi/src/x86/ffi.c
@@ -0,0 +1,557 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 1996, 1998, 1999, 2001 Red Hat, Inc.
+ Copyright (c) 2002 Ranjit Mathew
+ Copyright (c) 2002 Bo Thorsen
+ Copyright (c) 2002 Roger Sayle
+
+ x86 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef __x86_64__
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+
+/* ffi_prep_args is called by the assembly routine once stack space
+ has been allocated for the function's arguments */
+
+/*@-exportheader@*/
+void ffi_prep_args(char *stack, extended_cif *ecif)
+/*@=exportheader@*/
+{
+ register unsigned int i;
+ register void **p_argv;
+ register char *argp;
+ register ffi_type **p_arg;
+
+ argp = stack;
+
+ if (ecif->cif->flags == FFI_TYPE_STRUCT)
+ {
+ *(void **) argp = ecif->rvalue;
+ argp += 4;
+ }
+
+ p_argv = ecif->avalue;
+
+ for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types;
+ i != 0;
+ i--, p_arg++)
+ {
+ size_t z;
+
+ /* Align if necessary */
+ if ((sizeof(int) - 1) & (unsigned) argp)
+ argp = (char *) ALIGN(argp, sizeof(int));
+
+ z = (*p_arg)->size;
+ if (z < sizeof(int))
+ {
+ z = sizeof(int);
+ switch ((*p_arg)->type)
+ {
+ case FFI_TYPE_SINT8:
+ *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_UINT8:
+ *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_SINT16:
+ *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_UINT16:
+ *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_SINT32:
+ *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_UINT32:
+ *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv);
+ break;
+
+ default:
+ FFI_ASSERT(0);
+ }
+ }
+ else
+ {
+ memcpy(argp, *p_argv, z);
+ }
+ p_argv++;
+ argp += z;
+ }
+
+ return;
+}
+
+/* Perform machine dependent cif processing */
+ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ /* Set the return type flag */
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+#ifndef X86_WIN32
+ case FFI_TYPE_STRUCT:
+#endif
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ cif->flags = (unsigned) cif->rtype->type;
+ break;
+
+ case FFI_TYPE_UINT64:
+ cif->flags = FFI_TYPE_SINT64;
+ break;
+
+#ifdef X86_WIN32
+ case FFI_TYPE_STRUCT:
+ if (cif->rtype->size == 1)
+ {
+ cif->flags = FFI_TYPE_SINT8; /* same as char size */
+ }
+ else if (cif->rtype->size == 2)
+ {
+ cif->flags = FFI_TYPE_SINT16; /* same as short size */
+ }
+ else if (cif->rtype->size == 4)
+ {
+ cif->flags = FFI_TYPE_INT; /* same as int type */
+ }
+ else if (cif->rtype->size == 8)
+ {
+ cif->flags = FFI_TYPE_SINT64; /* same as int64 type */
+ }
+ else
+ {
+ cif->flags = FFI_TYPE_STRUCT;
+ }
+ break;
+#endif
+
+ default:
+ cif->flags = FFI_TYPE_INT;
+ break;
+ }
+
+ return FFI_OK;
+}
+
+/*@-declundef@*/
+/*@-exportheader@*/
+extern void ffi_call_SYSV(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+/*@=declundef@*/
+/*@=exportheader@*/
+
+#ifdef X86_WIN32
+/*@-declundef@*/
+/*@-exportheader@*/
+extern void ffi_call_STDCALL(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+/*@=declundef@*/
+/*@=exportheader@*/
+#endif /* X86_WIN32 */
+
+void ffi_call(/*@dependent@*/ ffi_cif *cif,
+ void (*fn)(),
+ /*@out@*/ void *rvalue,
+ /*@dependent@*/ void **avalue)
+{
+ extended_cif ecif;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ /* If the return value is a struct and we don't have a return */
+ /* value address then we need to make one */
+
+ if ((rvalue == NULL) &&
+ (cif->flags == FFI_TYPE_STRUCT))
+ {
+ /*@-sysunrecog@*/
+ ecif.rvalue = alloca(cif->rtype->size);
+ /*@=sysunrecog@*/
+ }
+ else
+ ecif.rvalue = rvalue;
+
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ /*@-usedef@*/
+ ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#ifdef X86_WIN32
+ case FFI_STDCALL:
+ /*@-usedef@*/
+ ffi_call_STDCALL(ffi_prep_args, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#endif /* X86_WIN32 */
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+}
+
+
+/** private members **/
+
+static void ffi_prep_incoming_args_SYSV (char *stack, void **ret,
+ void** args, ffi_cif* cif);
+static void ffi_closure_SYSV (ffi_closure *)
+ __attribute__ ((regparm(1)));
+static void ffi_closure_raw_SYSV (ffi_raw_closure *)
+ __attribute__ ((regparm(1)));
+
+/* This function is jumped to by the trampoline */
+
+static void
+ffi_closure_SYSV (closure)
+ ffi_closure *closure;
+{
+ // this is our return value storage
+ long double res;
+
+ // our various things...
+ ffi_cif *cif;
+ void **arg_area;
+ unsigned short rtype;
+ void *resp = (void*)&res;
+ void *args = __builtin_dwarf_cfa ();
+
+ cif = closure->cif;
+ arg_area = (void**) alloca (cif->nargs * sizeof (void*));
+
+ /* this call will initialize ARG_AREA, such that each
+ * element in that array points to the corresponding
+ * value on the stack; and if the function returns
+ * a structure, it will re-set RESP to point to the
+ * structure return address. */
+
+ ffi_prep_incoming_args_SYSV(args, (void**)&resp, arg_area, cif);
+
+ (closure->fun) (cif, resp, arg_area, closure->user_data);
+
+ rtype = cif->flags;
+
+ /* now, do a generic return based on the value of rtype */
+ if (rtype == FFI_TYPE_INT)
+ {
+ asm ("movl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+ else if (rtype == FFI_TYPE_FLOAT)
+ {
+ asm ("flds (%0)" : : "r" (resp) : "st" );
+ }
+ else if (rtype == FFI_TYPE_DOUBLE)
+ {
+ asm ("fldl (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_LONGDOUBLE)
+ {
+ asm ("fldt (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_SINT64)
+ {
+ asm ("movl 0(%0),%%eax;"
+ "movl 4(%0),%%edx"
+ : : "r"(resp)
+ : "eax", "edx");
+ }
+#ifdef X86_WIN32
+ else if (rtype == FFI_TYPE_SINT8) /* 1-byte struct */
+ {
+ asm ("movsbl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+ else if (rtype == FFI_TYPE_SINT16) /* 2-bytes struct */
+ {
+ asm ("movswl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+#endif
+}
+
+/*@-exportheader@*/
+static void
+ffi_prep_incoming_args_SYSV(char *stack, void **rvalue,
+ void **avalue, ffi_cif *cif)
+/*@=exportheader@*/
+{
+ register unsigned int i;
+ register void **p_argv;
+ register char *argp;
+ register ffi_type **p_arg;
+
+ argp = stack;
+
+ if ( cif->flags == FFI_TYPE_STRUCT ) {
+ *rvalue = *(void **) argp;
+ argp += 4;
+ }
+
+ p_argv = avalue;
+
+ for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++)
+ {
+ size_t z;
+
+ /* Align if necessary */
+ if ((sizeof(int) - 1) & (unsigned) argp) {
+ argp = (char *) ALIGN(argp, sizeof(int));
+ }
+
+ z = (*p_arg)->size;
+
+ /* because we're little endian, this is what it turns into. */
+
+ *p_argv = (void*) argp;
+
+ p_argv++;
+ argp += z;
+ }
+
+ return;
+}
+
+/* How to make a trampoline. Derived from gcc/config/i386/i386.c. */
+
+#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX) \
+({ unsigned char *__tramp = (unsigned char*)(TRAMP); \
+ unsigned int __fun = (unsigned int)(FUN); \
+ unsigned int __ctx = (unsigned int)(CTX); \
+ unsigned int __dis = __fun - ((unsigned int) __tramp + FFI_TRAMPOLINE_SIZE); \
+ *(unsigned char*) &__tramp[0] = 0xb8; \
+ *(unsigned int*) &__tramp[1] = __ctx; /* movl __ctx, %eax */ \
+ *(unsigned char *) &__tramp[5] = 0xe9; \
+ *(unsigned int*) &__tramp[6] = __dis; /* jmp __fun */ \
+ })
+
+
+/* the cif must already be prep'ed */
+
+ffi_status
+ffi_prep_closure (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data)
+{
+ FFI_ASSERT (cif->abi == FFI_SYSV);
+
+ FFI_INIT_TRAMPOLINE (&closure->tramp[0], \
+ &ffi_closure_SYSV, \
+ (void*)closure);
+
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+/* ------- Native raw API support -------------------------------- */
+
+#if !FFI_NO_RAW_API
+
+static void
+ffi_closure_raw_SYSV (closure)
+ ffi_raw_closure *closure;
+{
+ // this is our return value storage
+ long double res;
+
+ // our various things...
+ ffi_raw *raw_args;
+ ffi_cif *cif;
+ unsigned short rtype;
+ void *resp = (void*)&res;
+
+ /* get the cif */
+ cif = closure->cif;
+
+ /* the SYSV/X86 abi matches the RAW API exactly, well.. almost */
+ raw_args = (ffi_raw*) __builtin_dwarf_cfa ();
+
+ (closure->fun) (cif, resp, raw_args, closure->user_data);
+
+ rtype = cif->flags;
+
+ /* now, do a generic return based on the value of rtype */
+ if (rtype == FFI_TYPE_INT)
+ {
+ asm ("movl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+ else if (rtype == FFI_TYPE_FLOAT)
+ {
+ asm ("flds (%0)" : : "r" (resp) : "st" );
+ }
+ else if (rtype == FFI_TYPE_DOUBLE)
+ {
+ asm ("fldl (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_LONGDOUBLE)
+ {
+ asm ("fldt (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_SINT64)
+ {
+ asm ("movl 0(%0),%%eax; movl 4(%0),%%edx"
+ : : "r"(resp)
+ : "eax", "edx");
+ }
+}
+
+
+
+
+ffi_status
+ffi_prep_raw_closure (ffi_raw_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data)
+{
+ int i;
+
+ FFI_ASSERT (cif->abi == FFI_SYSV);
+
+ // we currently don't support certain kinds of arguments for raw
+ // closures. This should be implemented by a separate assembly language
+ // routine, since it would require argument processing, something we
+ // don't do now for performance.
+
+ for (i = cif->nargs-1; i >= 0; i--)
+ {
+ FFI_ASSERT (cif->arg_types[i]->type != FFI_TYPE_STRUCT);
+ FFI_ASSERT (cif->arg_types[i]->type != FFI_TYPE_LONGDOUBLE);
+ }
+
+
+ FFI_INIT_TRAMPOLINE (&closure->tramp[0], &ffi_closure_raw_SYSV,
+ (void*)closure);
+
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+static void
+ffi_prep_args_raw(char *stack, extended_cif *ecif)
+{
+ memcpy (stack, ecif->avalue, ecif->cif->bytes);
+}
+
+/* we borrow this routine from libffi (it must be changed, though, to
+ * actually call the function passed in the first argument. as of
+ * libffi-1.20, this is not the case.)
+ */
+
+extern void
+ffi_call_SYSV(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+
+#ifdef X86_WIN32
+extern void
+ffi_call_STDCALL(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+#endif /* X86_WIN32 */
+
+void
+ffi_raw_call(/*@dependent@*/ ffi_cif *cif,
+ void (*fn)(),
+ /*@out@*/ void *rvalue,
+ /*@dependent@*/ ffi_raw *fake_avalue)
+{
+ extended_cif ecif;
+ void **avalue = (void **)fake_avalue;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ /* If the return value is a struct and we don't have a return */
+ /* value address then we need to make one */
+
+ if ((rvalue == NULL) &&
+ (cif->rtype->type == FFI_TYPE_STRUCT))
+ {
+ /*@-sysunrecog@*/
+ ecif.rvalue = alloca(cif->rtype->size);
+ /*@=sysunrecog@*/
+ }
+ else
+ ecif.rvalue = rvalue;
+
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ /*@-usedef@*/
+ ffi_call_SYSV(ffi_prep_args_raw, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#ifdef X86_WIN32
+ case FFI_STDCALL:
+ /*@-usedef@*/
+ ffi_call_STDCALL(ffi_prep_args_raw, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#endif /* X86_WIN32 */
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+}
+
+#endif
+
+#endif /* __x86_64__ */
diff --git a/libffi/src/x86/ffi64.c b/libffi/src/x86/ffi64.c
new file mode 100644
index 0000000..343ee92
--- /dev/null
+++ b/libffi/src/x86/ffi64.c
@@ -0,0 +1,706 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2002 Bo Thorsen <bo@suse.de>
+
+ x86-64 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+#include <stdarg.h>
+
+/* ffi_prep_args is called by the assembly routine once stack space
+ has been allocated for the function's arguments */
+
+#ifdef __x86_64__
+
+#define MAX_GPR_REGS 6
+#define MAX_SSE_REGS 8
+typedef struct
+{
+ /* Registers for argument passing. */
+ long gpr[MAX_GPR_REGS];
+ __int128_t sse[MAX_SSE_REGS];
+
+ /* Stack space for arguments. */
+ char argspace[0];
+} stackLayout;
+
+/* All reference to register classes here is identical to the code in
+ gcc/config/i386/i386.c. Do *not* change one without the other. */
+
+/* Register class used for passing given 64bit part of the argument.
+ These represent classes as documented by the PS ABI, with the exception
+ of SSESF, SSEDF classes, that are basically SSE class, just gcc will
+ use SF or DFmode move instead of DImode to avoid reformating penalties.
+
+ Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves
+ whenever possible (upper half does contain padding).
+ */
+enum x86_64_reg_class
+ {
+ X86_64_NO_CLASS,
+ X86_64_INTEGER_CLASS,
+ X86_64_INTEGERSI_CLASS,
+ X86_64_SSE_CLASS,
+ X86_64_SSESF_CLASS,
+ X86_64_SSEDF_CLASS,
+ X86_64_SSEUP_CLASS,
+ X86_64_X87_CLASS,
+ X86_64_X87UP_CLASS,
+ X86_64_MEMORY_CLASS
+ };
+
+#define MAX_CLASSES 4
+
+/* x86-64 register passing implementation. See x86-64 ABI for details. Goal
+ of this code is to classify each 8bytes of incoming argument by the register
+ class and assign registers accordingly. */
+
+/* Return the union class of CLASS1 and CLASS2.
+ See the x86-64 PS ABI for details. */
+
+static enum x86_64_reg_class
+merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
+{
+ /* Rule #1: If both classes are equal, this is the resulting class. */
+ if (class1 == class2)
+ return class1;
+
+ /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
+ the other class. */
+ if (class1 == X86_64_NO_CLASS)
+ return class2;
+ if (class2 == X86_64_NO_CLASS)
+ return class1;
+
+ /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
+ if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
+ return X86_64_MEMORY_CLASS;
+
+ /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
+ if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
+ || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
+ return X86_64_INTEGERSI_CLASS;
+ if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
+ || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
+ return X86_64_INTEGER_CLASS;
+
+ /* Rule #5: If one of the classes is X87 or X87UP class, MEMORY is used. */
+ if (class1 == X86_64_X87_CLASS || class1 == X86_64_X87UP_CLASS
+ || class2 == X86_64_X87_CLASS || class2 == X86_64_X87UP_CLASS)
+ return X86_64_MEMORY_CLASS;
+
+ /* Rule #6: Otherwise class SSE is used. */
+ return X86_64_SSE_CLASS;
+}
+
+/* Classify the argument of type TYPE and mode MODE.
+ CLASSES will be filled by the register class used to pass each word
+ of the operand. The number of words is returned. In case the parameter
+ should be passed in memory, 0 is returned. As a special case for zero
+ sized containers, classes[0] will be NO_CLASS and 1 is returned.
+
+ See the x86-64 PS ABI for details.
+*/
+static int
+classify_argument (ffi_type *type, enum x86_64_reg_class classes[],
+ int *byte_offset)
+{
+ /* First, align to the right place. */
+ *byte_offset = ALIGN(*byte_offset, type->alignment);
+
+ switch (type->type)
+ {
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_POINTER:
+ if (((*byte_offset) % 8 + type->size) <= 4)
+ classes[0] = X86_64_INTEGERSI_CLASS;
+ else
+ classes[0] = X86_64_INTEGER_CLASS;
+ return 1;
+ case FFI_TYPE_FLOAT:
+ if (((*byte_offset) % 8) == 0)
+ classes[0] = X86_64_SSESF_CLASS;
+ else
+ classes[0] = X86_64_SSE_CLASS;
+ return 1;
+ case FFI_TYPE_DOUBLE:
+ classes[0] = X86_64_SSEDF_CLASS;
+ return 1;
+ case FFI_TYPE_LONGDOUBLE:
+ classes[0] = X86_64_X87_CLASS;
+ classes[1] = X86_64_X87UP_CLASS;
+ return 2;
+ case FFI_TYPE_STRUCT:
+ {
+ const int UNITS_PER_WORD = 8;
+ int words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ ffi_type **ptr;
+ int i;
+ enum x86_64_reg_class subclasses[MAX_CLASSES];
+
+ /* If the struct is larger than 16 bytes, pass it on the stack. */
+ if (type->size > 16)
+ return 0;
+
+ for (i = 0; i < words; i++)
+ classes[i] = X86_64_NO_CLASS;
+
+ /* Merge the fields of structure. */
+ for (ptr=type->elements; (*ptr)!=NULL; ptr++)
+ {
+ int num;
+
+ num = classify_argument (*ptr, subclasses, byte_offset);
+ if (num == 0)
+ return 0;
+ for (i = 0; i < num; i++)
+ {
+ int pos = *byte_offset / 8;
+ classes[i + pos] =
+ merge_classes (subclasses[i], classes[i + pos]);
+ }
+
+ if ((*ptr)->type != FFI_TYPE_STRUCT)
+ *byte_offset += (*ptr)->size;
+ }
+
+ /* Final merger cleanup. */
+ for (i = 0; i < words; i++)
+ {
+ /* If one class is MEMORY, everything should be passed in
+ memory. */
+ if (classes[i] == X86_64_MEMORY_CLASS)
+ return 0;
+
+ /* The X86_64_SSEUP_CLASS should be always preceded by
+ X86_64_SSE_CLASS. */
+ if (classes[i] == X86_64_SSEUP_CLASS
+ && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
+ classes[i] = X86_64_SSE_CLASS;
+
+ /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
+ if (classes[i] == X86_64_X87UP_CLASS
+ && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
+ classes[i] = X86_64_SSE_CLASS;
+ }
+ return words;
+ }
+
+ default:
+ FFI_ASSERT(0);
+ }
+ return 0; /* Never reached. */
+}
+
+/* Examine the argument and return set number of register required in each
+ class. Return 0 iff parameter should be passed in memory. */
+static int
+examine_argument (ffi_type *type, int in_return, int *int_nregs,int *sse_nregs)
+{
+ enum x86_64_reg_class class[MAX_CLASSES];
+ int offset = 0;
+ int n;
+
+ n = classify_argument (type, class, &offset);
+
+ if (n == 0)
+ return 0;
+
+ *int_nregs = 0;
+ *sse_nregs = 0;
+ for (n--; n>=0; n--)
+ switch (class[n])
+ {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ (*int_nregs)++;
+ break;
+ case X86_64_SSE_CLASS:
+ case X86_64_SSESF_CLASS:
+ case X86_64_SSEDF_CLASS:
+ (*sse_nregs)++;
+ break;
+ case X86_64_NO_CLASS:
+ case X86_64_SSEUP_CLASS:
+ break;
+ case X86_64_X87_CLASS:
+ case X86_64_X87UP_CLASS:
+ if (!in_return)
+ return 0;
+ break;
+ default:
+ abort ();
+ }
+ return 1;
+}
+
+/* Functions to load floats and double to an SSE register placeholder. */
+extern void float2sse (float, __int128_t *);
+extern void double2sse (double, __int128_t *);
+extern void floatfloat2sse (void *, __int128_t *);
+
+/* Functions to put the floats and doubles back. */
+extern float sse2float (__int128_t *);
+extern double sse2double (__int128_t *);
+extern void sse2floatfloat(__int128_t *, void *);
+
+/*@-exportheader@*/
+void
+ffi_prep_args (stackLayout *stack, extended_cif *ecif)
+/*@=exportheader@*/
+{
+ int gprcount, ssecount, i, g, s;
+ void **p_argv;
+ void *argp = &stack->argspace;
+ ffi_type **p_arg;
+
+ /* First check if the return value should be passed in memory. If so,
+ pass the pointer as the first argument. */
+ gprcount = ssecount = 0;
+ if (ecif->cif->rtype->type != FFI_TYPE_VOID
+ && examine_argument (ecif->cif->rtype, 1, &g, &s) == 0)
+ stack->gpr[gprcount++] = (long) ecif->rvalue;
+
+ for (i=ecif->cif->nargs, p_arg=ecif->cif->arg_types, p_argv = ecif->avalue;
+ i!=0; i--, p_arg++, p_argv++)
+ {
+ int in_register = 0;
+
+ switch ((*p_arg)->type)
+ {
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_POINTER:
+ if (gprcount < MAX_GPR_REGS)
+ {
+ stack->gpr[gprcount] = 0;
+ stack->gpr[gprcount++] = *(long long *)(*p_argv);
+ in_register = 1;
+ }
+ break;
+
+ case FFI_TYPE_FLOAT:
+ if (ssecount < MAX_SSE_REGS)
+ {
+ float2sse (*(float *)(*p_argv), &stack->sse[ssecount++]);
+ in_register = 1;
+ }
+ break;
+
+ case FFI_TYPE_DOUBLE:
+ if (ssecount < MAX_SSE_REGS)
+ {
+ double2sse (*(double *)(*p_argv), &stack->sse[ssecount++]);
+ in_register = 1;
+ }
+ break;
+ }
+
+ if (in_register)
+ continue;
+
+ /* Either all places in registers where filled, or this is a
+ type that potentially goes into a memory slot. */
+ if (examine_argument (*p_arg, 0, &g, &s) == 0
+ || gprcount + g > MAX_GPR_REGS || ssecount + s > MAX_SSE_REGS)
+ {
+ /* Pass this argument in memory. */
+ argp = (void *)ALIGN(argp, (*p_arg)->alignment);
+ memcpy (argp, *p_argv, (*p_arg)->size);
+ argp += (*p_arg)->size;
+ }
+ else
+ {
+ /* All easy cases are eliminated. Now fire the big guns. */
+
+ enum x86_64_reg_class classes[MAX_CLASSES];
+ int offset = 0, j, num;
+ void *a;
+
+ num = classify_argument (*p_arg, classes, &offset);
+ for (j=0, a=*p_argv; j<num; j++, a+=8)
+ {
+ switch (classes[j])
+ {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ stack->gpr[gprcount++] = *(long long *)a;
+ break;
+ case X86_64_SSE_CLASS:
+ floatfloat2sse (a, &stack->sse[ssecount++]);
+ break;
+ case X86_64_SSESF_CLASS:
+ float2sse (*(float *)a, &stack->sse[ssecount++]);
+ break;
+ case X86_64_SSEDF_CLASS:
+ double2sse (*(double *)a, &stack->sse[ssecount++]);
+ break;
+ default:
+ abort();
+ }
+ }
+ }
+ }
+}
+
+/* Perform machine dependent cif processing. */
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ int gprcount, ssecount, i, g, s;
+
+ gprcount = ssecount = 0;
+
+ /* Reset the byte count. We handle this size estimation here. */
+ cif->bytes = 0;
+
+ /* If the return value should be passed in memory, pass the pointer
+ as the first argument. The actual memory isn't allocated here. */
+ if (cif->rtype->type != FFI_TYPE_VOID
+ && examine_argument (cif->rtype, 1, &g, &s) == 0)
+ gprcount = 1;
+
+ /* Go over all arguments and determine the way they should be passed.
+ If it's in a register and there is space for it, let that be so. If
+ not, add it's size to the stack byte count. */
+ for (i=0; i<cif->nargs; i++)
+ {
+ if (examine_argument (cif->arg_types[i], 0, &g, &s) == 0
+ || gprcount + g > MAX_GPR_REGS || ssecount + s > MAX_SSE_REGS)
+ {
+ /* This is passed in memory. First align to the basic type. */
+ cif->bytes = ALIGN(cif->bytes, cif->arg_types[i]->alignment);
+
+ /* Stack arguments are *always* at least 8 byte aligned. */
+ cif->bytes = ALIGN(cif->bytes, 8);
+
+ /* Now add the size of this argument. */
+ cif->bytes += cif->arg_types[i]->size;
+ }
+ else
+ {
+ gprcount += g;
+ ssecount += s;
+ }
+ }
+
+ /* Set the flag for the closures return. */
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ case FFI_TYPE_STRUCT:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ cif->flags = (unsigned) cif->rtype->type;
+ break;
+
+ case FFI_TYPE_UINT64:
+ cif->flags = FFI_TYPE_SINT64;
+ break;
+
+ default:
+ cif->flags = FFI_TYPE_INT;
+ break;
+ }
+
+ return FFI_OK;
+}
+
+typedef struct
+{
+ long gpr[2];
+ __int128_t sse[2];
+ long double st0;
+} return_value;
+
+void
+ffi_fill_return_value (return_value *rv, extended_cif *ecif)
+{
+ enum x86_64_reg_class classes[MAX_CLASSES];
+ int i = 0, num;
+ long *gpr = rv->gpr;
+ __int128_t *sse = rv->sse;
+ signed char sc;
+ signed short ss;
+
+ /* This is needed because of the way x86-64 handles signed short
+ integers. */
+ switch (ecif->cif->rtype->type)
+ {
+ case FFI_TYPE_SINT8:
+ sc = *(signed char *)gpr;
+ *(long long *)ecif->rvalue = (long long)sc;
+ return;
+ case FFI_TYPE_SINT16:
+ ss = *(signed short *)gpr;
+ *(long long *)ecif->rvalue = (long long)ss;
+ return;
+ default:
+ /* Just continue. */
+ ;
+ }
+
+ num = classify_argument (ecif->cif->rtype, classes, &i);
+
+ if (num == 0)
+ /* Return in memory. */
+ ecif->rvalue = (void *) rv->gpr[0];
+ else if (num == 2 && classes[0] == X86_64_X87_CLASS &&
+ classes[1] == X86_64_X87UP_CLASS)
+ /* This is a long double (this is easiest to handle this way instead
+ of an eightbyte at a time as in the loop below. */
+ *((long double *)ecif->rvalue) = rv->st0;
+ else
+ {
+ void *a;
+
+ for (i=0, a=ecif->rvalue; i<num; i++, a+=8)
+ {
+ switch (classes[i])
+ {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ *(long long *)a = *gpr;
+ gpr++;
+ break;
+ case X86_64_SSE_CLASS:
+ sse2floatfloat (sse++, a);
+ break;
+ case X86_64_SSESF_CLASS:
+ *(float *)a = sse2float (sse++);
+ break;
+ case X86_64_SSEDF_CLASS:
+ *(double *)a = sse2double (sse++);
+ break;
+ default:
+ abort();
+ }
+ }
+ }
+}
+
+/*@-declundef@*/
+/*@-exportheader@*/
+extern void ffi_call_UNIX64(void (*)(stackLayout *, extended_cif *),
+ void (*) (return_value *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, /*@out@*/ unsigned *, void (*fn)());
+/*@=declundef@*/
+/*@=exportheader@*/
+
+void ffi_call(/*@dependent@*/ ffi_cif *cif,
+ void (*fn)(),
+ /*@out@*/ void *rvalue,
+ /*@dependent@*/ void **avalue)
+{
+ extended_cif ecif;
+ int dummy;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ /* If the return value is a struct and we don't have a return */
+ /* value address then we need to make one */
+
+ if ((rvalue == NULL) &&
+ (examine_argument (cif->rtype, 1, &dummy, &dummy) == 0))
+ {
+ /*@-sysunrecog@*/
+ ecif.rvalue = alloca(cif->rtype->size);
+ /*@=sysunrecog@*/
+ }
+ else
+ ecif.rvalue = rvalue;
+
+ /* Stack must always be 16byte aligned. Make it so. */
+ cif->bytes = ALIGN(cif->bytes, 16);
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ /* Calling 32bit code from 64bit is not possible */
+ FFI_ASSERT(0);
+ break;
+
+ case FFI_UNIX64:
+ /*@-usedef@*/
+ ffi_call_UNIX64 (ffi_prep_args, ffi_fill_return_value, &ecif,
+ cif->bytes, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+}
+
+extern void ffi_closure_UNIX64(void);
+
+ffi_status
+ffi_prep_closure (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void *user_data)
+{
+ volatile unsigned short *tramp;
+
+ /* FFI_ASSERT (cif->abi == FFI_OSF); */
+
+ tramp = (volatile unsigned short *) &closure->tramp[0];
+ tramp[0] = 0xbb49; /* mov <code>, %r11 */
+ tramp[5] = 0xba49; /* mov <data>, %r10 */
+ tramp[10] = 0xff49; /* jmp *%r11 */
+ tramp[11] = 0x00e3;
+ *(void * volatile *) &tramp[1] = ffi_closure_UNIX64;
+ *(void * volatile *) &tramp[6] = closure;
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ return FFI_OK;
+}
+
+int
+ffi_closure_UNIX64_inner(ffi_closure *closure, va_list l, void *rp)
+{
+ ffi_cif *cif;
+ void **avalue;
+ ffi_type **arg_types;
+ long i, avn, argn;
+
+ cif = closure->cif;
+ avalue = alloca(cif->nargs * sizeof(void *));
+
+ argn = 0;
+
+ i = 0;
+ avn = cif->nargs;
+ arg_types = cif->arg_types;
+
+ /* Grab the addresses of the arguments from the stack frame. */
+ while (i < avn)
+ {
+ switch (arg_types[i]->type)
+ {
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_POINTER:
+ {
+ if (l->gp_offset > 48-8)
+ {
+ avalue[i] = l->overflow_arg_area;
+ l->overflow_arg_area = (char *)l->overflow_arg_area + 8;
+ }
+ else
+ {
+ avalue[i] = (char *)l->reg_save_area + l->gp_offset;
+ l->gp_offset += 8;
+ }
+ }
+ break;
+
+ case FFI_TYPE_STRUCT:
+ /* FIXME */
+ FFI_ASSERT(0);
+ break;
+
+ case FFI_TYPE_DOUBLE:
+ {
+ if (l->fp_offset > 176-16)
+ {
+ avalue[i] = l->overflow_arg_area;
+ l->overflow_arg_area = (char *)l->overflow_arg_area + 8;
+ }
+ else
+ {
+ avalue[i] = (char *)l->reg_save_area + l->fp_offset;
+ l->fp_offset += 16;
+ }
+ }
+#if DEBUG_FFI
+ fprintf (stderr, "double arg %d = %g\n", i, *(double *)avalue[i]);
+#endif
+ break;
+
+ case FFI_TYPE_FLOAT:
+ {
+ if (l->fp_offset > 176-16)
+ {
+ avalue[i] = l->overflow_arg_area;
+ l->overflow_arg_area = (char *)l->overflow_arg_area + 8;
+ }
+ else
+ {
+ avalue[i] = (char *)l->reg_save_area + l->fp_offset;
+ l->fp_offset += 16;
+ }
+ }
+#if DEBUG_FFI
+ fprintf (stderr, "float arg %d = %g\n", i, *(float *)avalue[i]);
+#endif
+ break;
+
+ default:
+ FFI_ASSERT(0);
+ }
+
+ argn += ALIGN(arg_types[i]->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG;
+ i++;
+ }
+
+ /* Invoke the closure. */
+ (closure->fun) (cif, rp, avalue, closure->user_data);
+
+ /* FIXME: Structs not supported. */
+ FFI_ASSERT(cif->rtype->type != FFI_TYPE_STRUCT);
+
+ /* Tell ffi_closure_UNIX64 how to perform return type promotions. */
+
+ return cif->rtype->type;
+}
+#endif /* ifndef __x86_64__ */
diff --git a/libffi/src/x86/ffi_darwin.c b/libffi/src/x86/ffi_darwin.c
new file mode 100644
index 0000000..bf79c56
--- /dev/null
+++ b/libffi/src/x86/ffi_darwin.c
@@ -0,0 +1,584 @@
+#ifdef __i386__
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 1996, 1998, 1999, 2001 Red Hat, Inc.
+ Copyright (c) 2002 Ranjit Mathew
+ Copyright (c) 2002 Bo Thorsen
+ Copyright (c) 2002 Roger Sayle
+
+ x86 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef __x86_64__
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+
+/* ffi_prep_args is called by the assembly routine once stack space
+ has been allocated for the function's arguments */
+
+/*@-exportheader@*/
+void ffi_prep_args(char *stack, extended_cif *ecif)
+/*@=exportheader@*/
+{
+ register unsigned int i;
+ register void **p_argv;
+ register char *argp;
+ register ffi_type **p_arg;
+
+ argp = stack;
+
+ if (ecif->cif->flags == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 8)
+ {
+ *(void **) argp = ecif->rvalue;
+ argp += 4;
+ }
+
+ p_argv = ecif->avalue;
+
+ for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types;
+ i != 0;
+ i--, p_arg++)
+ {
+ size_t z;
+
+ /* Align if necessary */
+ if ((sizeof(int) - 1) & (unsigned) argp)
+ argp = (char *) ALIGN(argp, sizeof(int));
+
+ z = (*p_arg)->size;
+ if (z < sizeof(int))
+ {
+ z = sizeof(int);
+ switch ((*p_arg)->type)
+ {
+ case FFI_TYPE_SINT8:
+ *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_UINT8:
+ *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_SINT16:
+ *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_UINT16:
+ *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_SINT32:
+ *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_UINT32:
+ *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv);
+ break;
+
+ default:
+ FFI_ASSERT(0);
+ }
+ }
+ else
+ {
+ memcpy(argp, *p_argv, z);
+ }
+ p_argv++;
+ argp += z;
+ }
+
+ return;
+}
+
+/* Perform machine dependent cif processing */
+ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ /* Set the return type flag */
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+#if !defined(X86_WIN32)
+ case FFI_TYPE_STRUCT:
+#endif
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ cif->flags = (unsigned) cif->rtype->type;
+ break;
+
+ case FFI_TYPE_UINT64:
+ cif->flags = FFI_TYPE_SINT64;
+ break;
+
+#if defined X86_WIN32
+
+ case FFI_TYPE_STRUCT:
+ if (cif->rtype->size == 1)
+ {
+ cif->flags = FFI_TYPE_SINT8; /* same as char size */
+ }
+ else if (cif->rtype->size == 2)
+ {
+ cif->flags = FFI_TYPE_SINT16; /* same as short size */
+ }
+ else if (cif->rtype->size == 4)
+ {
+ cif->flags = FFI_TYPE_INT; /* same as int type */
+ }
+ else if (cif->rtype->size == 8)
+ {
+ cif->flags = FFI_TYPE_SINT64; /* same as int64 type */
+ }
+ else
+ {
+ cif->flags = FFI_TYPE_STRUCT;
+ }
+ break;
+#endif
+
+ default:
+ cif->flags = FFI_TYPE_INT;
+ break;
+ }
+
+ /* Darwin: The stack needs to be aligned to a multiple of 16 bytes */
+ cif->bytes = (cif->bytes + 15) & ~0xF;
+
+ return FFI_OK;
+}
+
+/*@-declundef@*/
+/*@-exportheader@*/
+extern void ffi_call_SYSV(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+/*@=declundef@*/
+/*@=exportheader@*/
+
+#ifdef X86_WIN32
+/*@-declundef@*/
+/*@-exportheader@*/
+extern void ffi_call_STDCALL(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+/*@=declundef@*/
+/*@=exportheader@*/
+#endif /* X86_WIN32 */
+
+void ffi_call(/*@dependent@*/ ffi_cif *cif,
+ void (*fn)(),
+ /*@out@*/ void *rvalue,
+ /*@dependent@*/ void **avalue)
+{
+ extended_cif ecif;
+ int flags;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ /* If the return value is a struct and we don't have a return */
+ /* value address then we need to make one */
+
+ if ((rvalue == NULL) &&
+ (cif->flags == FFI_TYPE_STRUCT) && (cif->rtype->size > 8))
+ {
+ /*@-sysunrecog@*/
+ ecif.rvalue = alloca(cif->rtype->size);
+ /*@=sysunrecog@*/
+ }
+ else
+ ecif.rvalue = rvalue;
+
+ flags = cif->flags;
+ if (flags == FFI_TYPE_STRUCT) {
+ if (cif->rtype->size == 8) {
+ flags = FFI_TYPE_SINT64;
+ } else if (cif->rtype->size < 8) {
+ flags = FFI_TYPE_INT;
+ }
+ }
+
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ /*@-usedef@*/
+ ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes,
+ flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#ifdef X86_WIN32
+ case FFI_STDCALL:
+ /*@-usedef@*/
+ ffi_call_STDCALL(ffi_prep_args, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#endif /* X86_WIN32 */
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+}
+
+
+/** private members **/
+
+static void ffi_prep_incoming_args_SYSV (char *stack, void **ret,
+ void** args, ffi_cif* cif);
+static void ffi_closure_SYSV (ffi_closure *)
+ __attribute__ ((regparm(1)));
+#if !FFI_NO_RAW_API
+static void ffi_closure_raw_SYSV (ffi_raw_closure *)
+ __attribute__ ((regparm(1)));
+#endif
+
+/* This function is jumped to by the trampoline */
+
+static void
+ffi_closure_SYSV (closure)
+ ffi_closure *closure;
+{
+ // this is our return value storage
+ long double res;
+
+ // our various things...
+ ffi_cif *cif;
+ void **arg_area;
+ unsigned short rtype;
+ void *resp = (void*)&res;
+ void *args = __builtin_dwarf_cfa ();
+
+ cif = closure->cif;
+ arg_area = (void**) alloca (cif->nargs * sizeof (void*));
+
+ /* this call will initialize ARG_AREA, such that each
+ * element in that array points to the corresponding
+ * value on the stack; and if the function returns
+ * a structure, it will re-set RESP to point to the
+ * structure return address. */
+
+ ffi_prep_incoming_args_SYSV(args, (void**)&resp, arg_area, cif);
+
+ (closure->fun) (cif, resp, arg_area, closure->user_data);
+
+ rtype = cif->flags;
+
+ if (rtype == FFI_TYPE_STRUCT && cif->rtype->size <= 8) {
+ if (cif->rtype->size == 8) {
+ rtype = FFI_TYPE_SINT64;
+ } else {
+ rtype = FFI_TYPE_INT;
+ }
+ }
+
+ /* now, do a generic return based on the value of rtype */
+ if (rtype == FFI_TYPE_INT)
+ {
+ asm ("movl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+ else if (rtype == FFI_TYPE_FLOAT)
+ {
+ asm ("flds (%0)" : : "r" (resp) : "st" );
+ }
+ else if (rtype == FFI_TYPE_DOUBLE)
+ {
+ asm ("fldl (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_LONGDOUBLE)
+ {
+ asm ("fldt (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_SINT64)
+ {
+ asm ("movl 0(%0),%%eax;"
+ "movl 4(%0),%%edx"
+ : : "r"(resp)
+ : "eax", "edx");
+ }
+#ifdef X86_WIN32
+ else if (rtype == FFI_TYPE_SINT8) /* 1-byte struct */
+ {
+ asm ("movsbl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+ else if (rtype == FFI_TYPE_SINT16) /* 2-bytes struct */
+ {
+ asm ("movswl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+#endif
+}
+
+/*@-exportheader@*/
+static void
+ffi_prep_incoming_args_SYSV(char *stack, void **rvalue,
+ void **avalue, ffi_cif *cif)
+/*@=exportheader@*/
+{
+ register unsigned int i;
+ register void **p_argv;
+ register char *argp;
+ register ffi_type **p_arg;
+
+ argp = stack;
+
+ if ( cif->flags == FFI_TYPE_STRUCT && (cif->rtype->size > 8)) {
+ *rvalue = *(void **) argp;
+ argp += 4;
+ }
+
+ p_argv = avalue;
+
+ for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++)
+ {
+ size_t z;
+
+ /* Align if necessary */
+ if ((sizeof(int) - 1) & (unsigned) argp) {
+ argp = (char *) ALIGN(argp, sizeof(int));
+ }
+
+ z = (*p_arg)->size;
+
+ /* because we're little endian, this is what it turns into. */
+
+ *p_argv = (void*) argp;
+
+ p_argv++;
+ argp += z;
+ }
+
+ return;
+}
+
+/* How to make a trampoline. Derived from gcc/config/i386/i386.c. */
+
+#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX) \
+({ unsigned char *__tramp = (unsigned char*)(TRAMP); \
+ unsigned int __fun = (unsigned int)(FUN); \
+ unsigned int __ctx = (unsigned int)(CTX); \
+ unsigned int __dis = __fun - ((unsigned int) __tramp + FFI_TRAMPOLINE_SIZE); \
+ *(unsigned char*) &__tramp[0] = 0xb8; \
+ *(unsigned int*) &__tramp[1] = __ctx; /* movl __ctx, %eax */ \
+ *(unsigned char *) &__tramp[5] = 0xe9; \
+ *(unsigned int*) &__tramp[6] = __dis; /* jmp __fun */ \
+ })
+
+
+/* the cif must already be prep'ed */
+
+ffi_status
+ffi_prep_closure (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data)
+{
+ FFI_ASSERT (cif->abi == FFI_SYSV);
+
+ FFI_INIT_TRAMPOLINE (&closure->tramp[0], \
+ &ffi_closure_SYSV, \
+ (void*)closure);
+
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+/* ------- Native raw API support -------------------------------- */
+
+#if !FFI_NO_RAW_API
+
+static void
+ffi_closure_raw_SYSV (closure)
+ ffi_raw_closure *closure;
+{
+ // this is our return value storage
+ long double res;
+
+ // our various things...
+ ffi_raw *raw_args;
+ ffi_cif *cif;
+ unsigned short rtype;
+ void *resp = (void*)&res;
+
+ /* get the cif */
+ cif = closure->cif;
+
+ /* the SYSV/X86 abi matches the RAW API exactly, well.. almost */
+ raw_args = (ffi_raw*) __builtin_dwarf_cfa ();
+
+ (closure->fun) (cif, resp, raw_args, closure->user_data);
+
+ rtype = cif->flags;
+
+ /* now, do a generic return based on the value of rtype */
+ if (rtype == FFI_TYPE_INT)
+ {
+ asm ("movl (%0),%%eax" : : "r" (resp) : "eax");
+ }
+ else if (rtype == FFI_TYPE_FLOAT)
+ {
+ asm ("flds (%0)" : : "r" (resp) : "st" );
+ }
+ else if (rtype == FFI_TYPE_DOUBLE)
+ {
+ asm ("fldl (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_LONGDOUBLE)
+ {
+ asm ("fldt (%0)" : : "r" (resp) : "st", "st(1)" );
+ }
+ else if (rtype == FFI_TYPE_SINT64)
+ {
+ asm ("movl 0(%0),%%eax; movl 4(%0),%%edx"
+ : : "r"(resp)
+ : "eax", "edx");
+ }
+}
+
+
+
+
+ffi_status
+ffi_prep_raw_closure (ffi_raw_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,ffi_raw*,void*),
+ void *user_data)
+{
+ int i;
+
+ FFI_ASSERT (cif->abi == FFI_SYSV);
+
+ // we currently don't support certain kinds of arguments for raw
+ // closures. This should be implemented by a separate assembly language
+ // routine, since it would require argument processing, something we
+ // don't do now for performance.
+
+ for (i = cif->nargs-1; i >= 0; i--)
+ {
+ FFI_ASSERT (cif->arg_types[i]->type != FFI_TYPE_STRUCT);
+ FFI_ASSERT (cif->arg_types[i]->type != FFI_TYPE_LONGDOUBLE);
+ }
+
+
+ FFI_INIT_TRAMPOLINE (&closure->tramp[0], &ffi_closure_raw_SYSV,
+ (void*)closure);
+
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+static void
+ffi_prep_args_raw(char *stack, extended_cif *ecif)
+{
+ memcpy (stack, ecif->avalue, ecif->cif->bytes);
+}
+
+/* we borrow this routine from libffi (it must be changed, though, to
+ * actually call the function passed in the first argument. as of
+ * libffi-1.20, this is not the case.)
+ */
+
+extern void
+ffi_call_SYSV(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+
+#ifdef X86_WIN32
+extern void
+ffi_call_STDCALL(void (*)(char *, extended_cif *),
+ /*@out@*/ extended_cif *,
+ unsigned, unsigned,
+ /*@out@*/ unsigned *,
+ void (*fn)());
+#endif /* X86_WIN32 */
+
+void
+ffi_raw_call(/*@dependent@*/ ffi_cif *cif,
+ void (*fn)(),
+ /*@out@*/ void *rvalue,
+ /*@dependent@*/ ffi_raw *fake_avalue)
+{
+ extended_cif ecif;
+ void **avalue = (void **)fake_avalue;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ /* If the return value is a struct and we don't have a return */
+ /* value address then we need to make one */
+
+ if ((rvalue == NULL) &&
+ (cif->rtype->type == FFI_TYPE_STRUCT) && (cif->rtype->size > 8))
+ {
+ /*@-sysunrecog@*/
+ ecif.rvalue = alloca(cif->rtype->size);
+ /*@=sysunrecog@*/
+ }
+ else
+ ecif.rvalue = rvalue;
+
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ /*@-usedef@*/
+ ffi_call_SYSV(ffi_prep_args_raw, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#ifdef X86_WIN32
+ case FFI_STDCALL:
+ /*@-usedef@*/
+ ffi_call_STDCALL(ffi_prep_args_raw, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
+ /*@=usedef@*/
+ break;
+#endif /* X86_WIN32 */
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+}
+
+#endif
+
+#endif /* __x86_64__ */
+
+#endif /* __i386__ */
diff --git a/libffi/src/x86/ffitarget.h b/libffi/src/x86/ffitarget.h
new file mode 100644
index 0000000..9500f40
--- /dev/null
+++ b/libffi/src/x86/ffitarget.h
@@ -0,0 +1,81 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ Target configuration macros for x86 and x86-64.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+/* ---- System specific configurations ----------------------------------- */
+
+#if defined (X86_64) && defined (__i386__)
+#undef X86_64
+#define X86
+#endif
+
+/* ---- Generic type definitions ----------------------------------------- */
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+
+ /* ---- Intel x86 Win32 ---------- */
+#ifdef X86_WIN32
+ FFI_SYSV,
+ FFI_STDCALL,
+ /* TODO: Add fastcall support for the sake of completeness */
+ FFI_DEFAULT_ABI = FFI_SYSV,
+#endif
+
+ /* ---- Intel x86 and AMD x86-64 - */
+#if !defined(X86_WIN32) && (defined(__i386__) || defined(__x86_64__))
+ FFI_SYSV,
+ FFI_UNIX64, /* Unix variants all use the same ABI for x86-64 */
+#ifdef __i386__
+ FFI_DEFAULT_ABI = FFI_SYSV,
+#else
+ FFI_DEFAULT_ABI = FFI_UNIX64,
+#endif
+#endif
+
+ FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+} ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+
+#ifdef X86_64
+#define FFI_TRAMPOLINE_SIZE 24
+#define FFI_NATIVE_RAW_API 0
+#else
+#define FFI_TRAMPOLINE_SIZE 10
+#define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */
+#endif
+
+#endif
+
diff --git a/libffi/src/x86/sysv.S b/libffi/src/x86/sysv.S
new file mode 100644
index 0000000..53a4c2b
--- /dev/null
+++ b/libffi/src/x86/sysv.S
@@ -0,0 +1,184 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 1996, 1998, 2001, 2002, 2003 Red Hat, Inc.
+
+ X86 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef __x86_64__
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+.text
+
+.globl ffi_prep_args
+
+ .align 4
+.globl ffi_call_SYSV
+ .type ffi_call_SYSV,@function
+
+ffi_call_SYSV:
+.LFB1:
+ pushl %ebp
+.LCFI0:
+ movl %esp,%ebp
+.LCFI1:
+ /* Make room for all of the new args. */
+ movl 16(%ebp),%ecx
+ subl %ecx,%esp
+
+ movl %esp,%eax
+
+ /* Place all of the ffi_prep_args in position */
+ pushl 12(%ebp)
+ pushl %eax
+ call *8(%ebp)
+
+ /* Return stack to previous state and call the function */
+ addl $8,%esp
+
+ call *28(%ebp)
+
+ /* Remove the space we pushed for the args */
+ movl 16(%ebp),%ecx
+ addl %ecx,%esp
+
+ /* Load %ecx with the return type code */
+ movl 20(%ebp),%ecx
+
+ /* If the return value pointer is NULL, assume no return value. */
+ cmpl $0,24(%ebp)
+ jne retint
+
+ /* Even if there is no space for the return value, we are
+ obliged to handle floating-point values. */
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne noretval
+ fstp %st(0)
+
+ jmp epilogue
+
+retint:
+ cmpl $FFI_TYPE_INT,%ecx
+ jne retfloat
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ jmp epilogue
+
+retfloat:
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne retdouble
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ fstps (%ecx)
+ jmp epilogue
+
+retdouble:
+ cmpl $FFI_TYPE_DOUBLE,%ecx
+ jne retlongdouble
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ fstpl (%ecx)
+ jmp epilogue
+
+retlongdouble:
+ cmpl $FFI_TYPE_LONGDOUBLE,%ecx
+ jne retint64
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ fstpt (%ecx)
+ jmp epilogue
+
+retint64:
+ cmpl $FFI_TYPE_SINT64,%ecx
+ jne retstruct
+ /* Load %ecx with the pointer to storage for the return value */
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ movl %edx,4(%ecx)
+
+retstruct:
+ /* Nothing to do! */
+
+noretval:
+epilogue:
+ movl %ebp,%esp
+ popl %ebp
+ ret
+.LFE1:
+.ffi_call_SYSV_end:
+ .size ffi_call_SYSV,.ffi_call_SYSV_end-ffi_call_SYSV
+
+ .section .eh_frame,EH_FRAME_FLAGS,@progbits
+.Lframe1:
+ .long .LECIE1-.LSCIE1 /* Length of Common Information Entry */
+.LSCIE1:
+ .long 0x0 /* CIE Identifier Tag */
+ .byte 0x1 /* CIE Version */
+#ifdef __PIC__
+ .ascii "zR\0" /* CIE Augmentation */
+#else
+ .ascii "\0" /* CIE Augmentation */
+#endif
+ .byte 0x1 /* .uleb128 0x1; CIE Code Alignment Factor */
+ .byte 0x7c /* .sleb128 -4; CIE Data Alignment Factor */
+ .byte 0x8 /* CIE RA Column */
+#ifdef __PIC__
+ .byte 0x1 /* .uleb128 0x1; Augmentation size */
+ .byte 0x1b /* FDE Encoding (pcrel sdata4) */
+#endif
+ .byte 0xc /* DW_CFA_def_cfa */
+ .byte 0x4 /* .uleb128 0x4 */
+ .byte 0x4 /* .uleb128 0x4 */
+ .byte 0x88 /* DW_CFA_offset, column 0x8 */
+ .byte 0x1 /* .uleb128 0x1 */
+ .align 4
+.LECIE1:
+.LSFDE1:
+ .long .LEFDE1-.LASFDE1 /* FDE Length */
+.LASFDE1:
+ .long .LASFDE1-.Lframe1 /* FDE CIE offset */
+#ifdef __PIC__
+ .long .LFB1-. /* FDE initial location */
+#else
+ .long .LFB1 /* FDE initial location */
+#endif
+ .long .LFE1-.LFB1 /* FDE address range */
+#ifdef __PIC__
+ .byte 0x0 /* .uleb128 0x0; Augmentation size */
+#endif
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .long .LCFI0-.LFB1
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .byte 0x8 /* .uleb128 0x8 */
+ .byte 0x85 /* DW_CFA_offset, column 0x5 */
+ .byte 0x2 /* .uleb128 0x2 */
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .long .LCFI1-.LCFI0
+ .byte 0xd /* DW_CFA_def_cfa_register */
+ .byte 0x5 /* .uleb128 0x5 */
+ .align 4
+.LEFDE1:
+
+#endif /* ifndef __x86_64__ */
diff --git a/libffi/src/x86/unix64.S b/libffi/src/x86/unix64.S
new file mode 100644
index 0000000..310fed7
--- /dev/null
+++ b/libffi/src/x86/unix64.S
@@ -0,0 +1,303 @@
+/* -----------------------------------------------------------------------
+ unix64.S - Copyright (c) 2002 Bo Thorsen <bo@suse.de>
+
+ x86-64 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifdef __x86_64__
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+ .section .rodata
+.LC0:
+ .string "asm in progress %lld\n"
+.LC1:
+ .string "asm in progress\n"
+.text
+ .align 2
+.globl ffi_call_UNIX64
+ .type ffi_call_UNIX64,@function
+
+ffi_call_UNIX64:
+.LFB1:
+ pushq %rbp
+.LCFI0:
+ movq %rsp, %rbp
+.LCFI1:
+ /* Save all arguments */
+ subq $48, %rsp
+.LCFI2:
+ movq %rdi, -8(%rbp) /* ffi_prep_args */
+ movq %rsi, -16(%rbp) /* ffi_fill_return_value */
+ movq %rdx, -24(%rbp) /* ecif */
+ movq %rcx, -32(%rbp) /* cif->bytes */
+ movq %r8, -40(%rbp) /* ecif.rvalue */
+ movq %r9, -48(%rbp) /* fn */
+
+ /* Make room for all of the new args and the register args */
+ addl $176, %ecx
+.LCFI3:
+ subq %rcx, %rsp
+.LCFI4:
+ /* Setup the call to ffi_prep_args. */
+ movq %rdi, %rax /* &ffi_prep_args */
+ movq %rsp, %rdi /* stackLayout */
+ movq %rdx, %rsi /* ecif */
+ call *%rax /* ffi_prep_args(stackLayout, ecif);*/
+
+ /* ffi_prep_args have put all the register contents into the */
+ /* stackLayout struct. Now put the register values in place. */
+ movq (%rsp), %rdi
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdx
+ movq 24(%rsp), %rcx
+ movq 32(%rsp), %r8
+ movq 40(%rsp), %r9
+ movaps 48(%rsp), %xmm0
+ movaps 64(%rsp), %xmm1
+ movaps 80(%rsp), %xmm2
+ movaps 96(%rsp), %xmm3
+ movaps 112(%rsp), %xmm4
+ movaps 128(%rsp), %xmm5
+ movaps 144(%rsp), %xmm6
+ movaps 160(%rsp), %xmm7
+
+ /* Remove space for stackLayout so stack arguments are placed
+ correctly for the call. */
+.LCFI5:
+ addq $176, %rsp
+.LCFI6:
+ /* Call the user function. */
+ call *-48(%rbp)
+
+ /* Make stack space for the return_value struct. */
+ subq $64, %rsp
+
+ /* Fill in all potential return values to this struct. */
+ movq %rax, (%rsp)
+ movq %rdx, 8(%rsp)
+ movaps %xmm0, 16(%rsp)
+ movaps %xmm1, 32(%rsp)
+ fstpt 48(%rsp)
+
+ /* Now call ffi_fill_return_value. */
+ movq %rsp, %rdi /* struct return_value */
+ movq -24(%rbp), %rsi /* ecif */
+ movq -16(%rbp), %rax /* &ffi_fill_return_value */
+ call *%rax /* call it */
+
+ /* And the work is done. */
+ leave
+ ret
+.LFE1:
+.ffi_call_UNIX64_end:
+ .size ffi_call_UNIX64,.ffi_call_UNIX64_end-ffi_call_UNIX64
+
+.text
+ .align 2
+.globl float2sse
+ .type float2sse,@function
+float2sse:
+ /* Save the contents of this sse-float in a pointer. */
+ movaps %xmm0, (%rdi)
+ ret
+
+ .align 2
+.globl floatfloat2sse
+ .type floatfloat2sse,@function
+floatfloat2sse:
+ /* Save the contents of these two sse-floats in a pointer. */
+ movq (%rdi), %xmm0
+ movaps %xmm0, (%rsi)
+ ret
+
+ .align 2
+.globl double2sse
+ .type double2sse,@function
+double2sse:
+ /* Save the contents of this sse-double in a pointer. */
+ movaps %xmm0, (%rdi)
+ ret
+
+ .align 2
+.globl sse2float
+ .type sse2float,@function
+sse2float:
+ /* Save the contents of this sse-float in a pointer. */
+ movaps (%rdi), %xmm0
+ ret
+
+ .align 2
+.globl sse2double
+ .type sse2double,@function
+sse2double:
+ /* Save the contents of this pointer in a sse-double. */
+ movaps (%rdi), %xmm0
+ ret
+
+ .align 2
+.globl sse2floatfloat
+ .type sse2floatfloat,@function
+sse2floatfloat:
+ /* Save the contents of this pointer in two sse-floats. */
+ movaps (%rdi), %xmm0
+ movq %xmm0, (%rsi)
+ ret
+
+ .align 2
+.globl ffi_closure_UNIX64
+ .type ffi_closure_UNIX64,@function
+
+ffi_closure_UNIX64:
+.LFB2:
+ pushq %rbp
+.LCFI10:
+ movq %rsp, %rbp
+.LCFI11:
+ subq $240, %rsp
+.LCFI12:
+ movq %rdi, -176(%rbp)
+ movq %rsi, -168(%rbp)
+ movq %rdx, -160(%rbp)
+ movq %rcx, -152(%rbp)
+ movq %r8, -144(%rbp)
+ movq %r9, -136(%rbp)
+ /* FIXME: We can avoid all this stashing of XMM registers by
+ (in ffi_prep_closure) computing the number of
+ floating-point args and moving it into %rax before calling
+ this function. Once this is done, uncomment the next few
+ lines and only the essential XMM registers will be written
+ to memory. This is a significant saving. */
+/* movzbl %al, %eax */
+/* movq %rax, %rdx */
+/* leaq 0(,%rdx,4), %rax */
+/* leaq 2f(%rip), %rdx */
+/* subq %rax, %rdx */
+ leaq -1(%rbp), %rax
+/* jmp *%rdx */
+ movaps %xmm7, -15(%rax)
+ movaps %xmm6, -31(%rax)
+ movaps %xmm5, -47(%rax)
+ movaps %xmm4, -63(%rax)
+ movaps %xmm3, -79(%rax)
+ movaps %xmm2, -95(%rax)
+ movaps %xmm1, -111(%rax)
+ movaps %xmm0, -127(%rax)
+2:
+ movl %edi, -180(%rbp)
+ movl $0, -224(%rbp)
+ movl $48, -220(%rbp)
+ leaq 16(%rbp), %rax
+ movq %rax, -216(%rbp)
+ leaq -176(%rbp), %rdx
+ movq %rdx, -208(%rbp)
+ leaq -224(%rbp), %rsi
+ movq %r10, %rdi
+ movq %rsp, %rdx
+ call ffi_closure_UNIX64_inner@PLT
+
+ cmpl $FFI_TYPE_FLOAT, %eax
+ je 1f
+ cmpl $FFI_TYPE_DOUBLE, %eax
+ je 2f
+ cmpl $FFI_TYPE_LONGDOUBLE, %eax
+ je 3f
+ cmpl $FFI_TYPE_STRUCT, %eax
+ je 4f
+ popq %rax
+ leave
+ ret
+1:
+2:
+3:
+ movaps -240(%rbp), %xmm0
+ leave
+ ret
+4:
+ leave
+ ret
+.LFE2:
+
+ .section .eh_frame,EH_FRAME_FLAGS,@progbits
+.Lframe0:
+ .long .LECIE1-.LSCIE1
+.LSCIE1:
+ .long 0x0
+ .byte 0x1
+ .string "zR"
+ .uleb128 0x1
+ .sleb128 -8
+ .byte 0x10
+ .uleb128 0x1
+ .byte 0x1b
+ .byte 0xc
+ .uleb128 0x7
+ .uleb128 0x8
+ .byte 0x90
+ .uleb128 0x1
+ .align 8
+.LECIE1:
+.LSFDE1:
+ .long .LEFDE1-.LASFDE1
+.LASFDE1:
+ .long .LASFDE1-.Lframe0
+
+ .long .LFB1-.
+ .long .LFE1-.LFB1
+ .uleb128 0x0
+ .byte 0x4 # DW_CFA_advance_loc4
+ .long .LCFI0-.LFB1
+ .byte 0xe # DW_CFA_def_cfa_offset
+ .uleb128 0x10
+ .byte 0x86 # DW_CFA_offset: r6 at cfa-16
+ .uleb128 0x2
+ .byte 0x4 # DW_CFA_advance_loc4
+ .long .LCFI1-.LCFI0
+ .byte 0x86 # DW_CFA_offset: r6 at cfa-16
+ .uleb128 0x2
+ .byte 0xd # DW_CFA_def_cfa_reg: r6
+ .uleb128 0x6
+ .align 8
+.LEFDE1:
+.LSFDE3:
+ .long .LEFDE3-.LASFDE3 # FDE Length
+.LASFDE3:
+ .long .LASFDE3-.Lframe0 # FDE CIE offset
+
+ .long .LFB2-. # FDE initial location
+ .long .LFE2-.LFB2 # FDE address range
+ .uleb128 0x0 # Augmentation size
+ .byte 0x4 # DW_CFA_advance_loc4
+ .long .LCFI10-.LFB2
+ .byte 0xe # DW_CFA_def_cfa_offset
+ .uleb128 0x10
+ .byte 0x86 # DW_CFA_offset, column 0x6
+ .uleb128 0x2
+ .byte 0x4 # DW_CFA_advance_loc4
+ .long .LCFI11-.LCFI10
+ .byte 0xd # DW_CFA_def_cfa_register
+ .uleb128 0x6
+ .align 8
+.LEFDE3:
+
+#endif /* __x86_64__ */
diff --git a/libffi/src/x86/win32.S b/libffi/src/x86/win32.S
new file mode 100644
index 0000000..bc2812c
--- /dev/null
+++ b/libffi/src/x86/win32.S
@@ -0,0 +1,259 @@
+/* -----------------------------------------------------------------------
+ win32.S - Copyright (c) 1996, 1998, 2001, 2002 Red Hat, Inc.
+ Copyright (c) 2001 John Beniton
+ Copyright (c) 2002 Ranjit Mathew
+
+
+ X86 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+.text
+
+.globl ffi_prep_args
+
+ # This assumes we are using gas.
+ .balign 16
+.globl _ffi_call_SYSV
+
+_ffi_call_SYSV:
+ pushl %ebp
+ movl %esp,%ebp
+
+ # Make room for all of the new args.
+ movl 16(%ebp),%ecx
+ subl %ecx,%esp
+
+ movl %esp,%eax
+
+ # Place all of the ffi_prep_args in position
+ pushl 12(%ebp)
+ pushl %eax
+ call *8(%ebp)
+
+ # Return stack to previous state and call the function
+ addl $8,%esp
+
+ # FIXME: Align the stack to a 128-bit boundary to avoid
+ # potential performance hits.
+
+ call *28(%ebp)
+
+ # Remove the space we pushed for the args
+ movl 16(%ebp),%ecx
+ addl %ecx,%esp
+
+ # Load %ecx with the return type code
+ movl 20(%ebp),%ecx
+
+ # If the return value pointer is NULL, assume no return value.
+ cmpl $0,24(%ebp)
+ jne retint
+
+ # Even if there is no space for the return value, we are
+ # obliged to handle floating-point values.
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne noretval
+ fstp %st(0)
+
+ jmp epilogue
+
+retint:
+ cmpl $FFI_TYPE_INT,%ecx
+ jne retfloat
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ jmp epilogue
+
+retfloat:
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne retdouble
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ fstps (%ecx)
+ jmp epilogue
+
+retdouble:
+ cmpl $FFI_TYPE_DOUBLE,%ecx
+ jne retlongdouble
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ fstpl (%ecx)
+ jmp epilogue
+
+retlongdouble:
+ cmpl $FFI_TYPE_LONGDOUBLE,%ecx
+ jne retint64
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ fstpt (%ecx)
+ jmp epilogue
+
+retint64:
+ cmpl $FFI_TYPE_SINT64,%ecx
+ jne retstruct1b
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ movl %edx,4(%ecx)
+
+retstruct1b:
+ cmpl $FFI_TYPE_SINT8,%ecx
+ jne retstruct2b
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movb %al,0(%ecx)
+ jmp epilogue
+
+retstruct2b:
+ cmpl $FFI_TYPE_SINT16,%ecx
+ jne retstruct
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movw %ax,0(%ecx)
+ jmp epilogue
+
+retstruct:
+ # Nothing to do!
+
+noretval:
+epilogue:
+ movl %ebp,%esp
+ popl %ebp
+ ret
+
+.ffi_call_SYSV_end:
+
+ # This assumes we are using gas.
+ .balign 16
+.globl _ffi_call_STDCALL
+
+_ffi_call_STDCALL:
+ pushl %ebp
+ movl %esp,%ebp
+
+ # Make room for all of the new args.
+ movl 16(%ebp),%ecx
+ subl %ecx,%esp
+
+ movl %esp,%eax
+
+ # Place all of the ffi_prep_args in position
+ pushl 12(%ebp)
+ pushl %eax
+ call *8(%ebp)
+
+ # Return stack to previous state and call the function
+ addl $8,%esp
+
+ # FIXME: Align the stack to a 128-bit boundary to avoid
+ # potential performance hits.
+
+ call *28(%ebp)
+
+ # stdcall functions pop arguments off the stack themselves
+
+ # Load %ecx with the return type code
+ movl 20(%ebp),%ecx
+
+ # If the return value pointer is NULL, assume no return value.
+ cmpl $0,24(%ebp)
+ jne sc_retint
+
+ # Even if there is no space for the return value, we are
+ # obliged to handle floating-point values.
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne sc_noretval
+ fstp %st(0)
+
+ jmp sc_epilogue
+
+sc_retint:
+ cmpl $FFI_TYPE_INT,%ecx
+ jne sc_retfloat
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ jmp sc_epilogue
+
+sc_retfloat:
+ cmpl $FFI_TYPE_FLOAT,%ecx
+ jne sc_retdouble
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ fstps (%ecx)
+ jmp sc_epilogue
+
+sc_retdouble:
+ cmpl $FFI_TYPE_DOUBLE,%ecx
+ jne sc_retlongdouble
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ fstpl (%ecx)
+ jmp sc_epilogue
+
+sc_retlongdouble:
+ cmpl $FFI_TYPE_LONGDOUBLE,%ecx
+ jne sc_retint64
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ fstpt (%ecx)
+ jmp sc_epilogue
+
+sc_retint64:
+ cmpl $FFI_TYPE_SINT64,%ecx
+ jne sc_retstruct1b
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movl %eax,0(%ecx)
+ movl %edx,4(%ecx)
+
+sc_retstruct1b:
+ cmpl $FFI_TYPE_SINT8,%ecx
+ jne sc_retstruct2b
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movb %al,0(%ecx)
+ jmp sc_epilogue
+
+sc_retstruct2b:
+ cmpl $FFI_TYPE_SINT16,%ecx
+ jne sc_retstruct
+ # Load %ecx with the pointer to storage for the return value
+ movl 24(%ebp),%ecx
+ movw %ax,0(%ecx)
+ jmp sc_epilogue
+
+sc_retstruct:
+ # Nothing to do!
+
+sc_noretval:
+sc_epilogue:
+ movl %ebp,%esp
+ popl %ebp
+ ret
+
+.ffi_call_STDCALL_end: