summaryrefslogtreecommitdiff
path: root/libffi-3.0.4/src/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'libffi-3.0.4/src/ia64')
-rw-r--r--libffi-3.0.4/src/ia64/ffi.c580
-rw-r--r--libffi-3.0.4/src/ia64/ffitarget.h50
-rw-r--r--libffi-3.0.4/src/ia64/ia64_flags.h40
-rw-r--r--libffi-3.0.4/src/ia64/unix.S556
4 files changed, 1226 insertions, 0 deletions
diff --git a/libffi-3.0.4/src/ia64/ffi.c b/libffi-3.0.4/src/ia64/ffi.c
new file mode 100644
index 0000000..77e8631
--- /dev/null
+++ b/libffi-3.0.4/src/ia64/ffi.c
@@ -0,0 +1,580 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 1998, 2007 Red Hat, Inc.
+ Copyright (c) 2000 Hewlett Packard Company
+
+ IA64 Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <float.h>
+
+#include "ia64_flags.h"
+
+/* A 64-bit pointer value. In LP64 mode, this is effectively a plain
+ pointer. In ILP32 mode, it's a pointer that's been extended to
+ 64 bits by "addp4". */
+typedef void *PTR64 __attribute__((mode(DI)));
+
+/* Memory image of fp register contents. This is the implementation
+ specific format used by ldf.fill/stf.spill. All we care about is
+ that it wants a 16 byte aligned slot. */
+typedef struct
+{
+ UINT64 x[2] __attribute__((aligned(16)));
+} fpreg;
+
+
+/* The stack layout given to ffi_call_unix and ffi_closure_unix_inner. */
+
+struct ia64_args
+{
+ fpreg fp_regs[8]; /* Contents of 8 fp arg registers. */
+ UINT64 gp_regs[8]; /* Contents of 8 gp arg registers. */
+ UINT64 other_args[]; /* Arguments passed on stack, variable size. */
+};
+
+
+/* Adjust ADDR, a pointer to an 8 byte slot, to point to the low LEN bytes. */
+
+static inline void *
+endian_adjust (void *addr, size_t len)
+{
+#ifdef __BIG_ENDIAN__
+ return addr + (8 - len);
+#else
+ return addr;
+#endif
+}
+
+/* Store VALUE to ADDR in the current cpu implementation's fp spill format.
+ This is a macro instead of a function, so that it works for all 3 floating
+ point types without type conversions. Type conversion to long double breaks
+ the denorm support. */
+
+#define stf_spill(addr, value) \
+ asm ("stf.spill %0 = %1%P0" : "=m" (*addr) : "f"(value));
+
+/* Load a value from ADDR, which is in the current cpu implementation's
+ fp spill format. As above, this must also be a macro. */
+
+#define ldf_fill(result, addr) \
+ asm ("ldf.fill %0 = %1%P1" : "=f"(result) : "m"(*addr));
+
+/* Return the size of the C type associated with with TYPE. Which will
+ be one of the FFI_IA64_TYPE_HFA_* values. */
+
+static size_t
+hfa_type_size (int type)
+{
+ switch (type)
+ {
+ case FFI_IA64_TYPE_HFA_FLOAT:
+ return sizeof(float);
+ case FFI_IA64_TYPE_HFA_DOUBLE:
+ return sizeof(double);
+ case FFI_IA64_TYPE_HFA_LDOUBLE:
+ return sizeof(__float80);
+ default:
+ abort ();
+ }
+}
+
+/* Load from ADDR a value indicated by TYPE. Which will be one of
+ the FFI_IA64_TYPE_HFA_* values. */
+
+static void
+hfa_type_load (fpreg *fpaddr, int type, void *addr)
+{
+ switch (type)
+ {
+ case FFI_IA64_TYPE_HFA_FLOAT:
+ stf_spill (fpaddr, *(float *) addr);
+ return;
+ case FFI_IA64_TYPE_HFA_DOUBLE:
+ stf_spill (fpaddr, *(double *) addr);
+ return;
+ case FFI_IA64_TYPE_HFA_LDOUBLE:
+ stf_spill (fpaddr, *(__float80 *) addr);
+ return;
+ default:
+ abort ();
+ }
+}
+
+/* Load VALUE into ADDR as indicated by TYPE. Which will be one of
+ the FFI_IA64_TYPE_HFA_* values. */
+
+static void
+hfa_type_store (int type, void *addr, fpreg *fpaddr)
+{
+ switch (type)
+ {
+ case FFI_IA64_TYPE_HFA_FLOAT:
+ {
+ float result;
+ ldf_fill (result, fpaddr);
+ *(float *) addr = result;
+ break;
+ }
+ case FFI_IA64_TYPE_HFA_DOUBLE:
+ {
+ double result;
+ ldf_fill (result, fpaddr);
+ *(double *) addr = result;
+ break;
+ }
+ case FFI_IA64_TYPE_HFA_LDOUBLE:
+ {
+ __float80 result;
+ ldf_fill (result, fpaddr);
+ *(__float80 *) addr = result;
+ break;
+ }
+ default:
+ abort ();
+ }
+}
+
+/* Is TYPE a struct containing floats, doubles, or extended doubles,
+ all of the same fp type? If so, return the element type. Return
+ FFI_TYPE_VOID if not. */
+
+static int
+hfa_element_type (ffi_type *type, int nested)
+{
+ int element = FFI_TYPE_VOID;
+
+ switch (type->type)
+ {
+ case FFI_TYPE_FLOAT:
+ /* We want to return VOID for raw floating-point types, but the
+ synthetic HFA type if we're nested within an aggregate. */
+ if (nested)
+ element = FFI_IA64_TYPE_HFA_FLOAT;
+ break;
+
+ case FFI_TYPE_DOUBLE:
+ /* Similarly. */
+ if (nested)
+ element = FFI_IA64_TYPE_HFA_DOUBLE;
+ break;
+
+ case FFI_TYPE_LONGDOUBLE:
+ /* Similarly, except that that HFA is true for double extended,
+ but not quad precision. Both have sizeof == 16, so tell the
+ difference based on the precision. */
+ if (LDBL_MANT_DIG == 64 && nested)
+ element = FFI_IA64_TYPE_HFA_LDOUBLE;
+ break;
+
+ case FFI_TYPE_STRUCT:
+ {
+ ffi_type **ptr = &type->elements[0];
+
+ for (ptr = &type->elements[0]; *ptr ; ptr++)
+ {
+ int sub_element = hfa_element_type (*ptr, 1);
+ if (sub_element == FFI_TYPE_VOID)
+ return FFI_TYPE_VOID;
+
+ if (element == FFI_TYPE_VOID)
+ element = sub_element;
+ else if (element != sub_element)
+ return FFI_TYPE_VOID;
+ }
+ }
+ break;
+
+ default:
+ return FFI_TYPE_VOID;
+ }
+
+ return element;
+}
+
+
+/* Perform machine dependent cif processing. */
+
+ffi_status
+ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ int flags;
+
+ /* Adjust cif->bytes to include space for the bits of the ia64_args frame
+ that preceeds the integer register portion. The estimate that the
+ generic bits did for the argument space required is good enough for the
+ integer component. */
+ cif->bytes += offsetof(struct ia64_args, gp_regs[0]);
+ if (cif->bytes < sizeof(struct ia64_args))
+ cif->bytes = sizeof(struct ia64_args);
+
+ /* Set the return type flag. */
+ flags = cif->rtype->type;
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_LONGDOUBLE:
+ /* Leave FFI_TYPE_LONGDOUBLE as meaning double extended precision,
+ and encode quad precision as a two-word integer structure. */
+ if (LDBL_MANT_DIG != 64)
+ flags = FFI_IA64_TYPE_SMALL_STRUCT | (16 << 8);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ {
+ size_t size = cif->rtype->size;
+ int hfa_type = hfa_element_type (cif->rtype, 0);
+
+ if (hfa_type != FFI_TYPE_VOID)
+ {
+ size_t nelts = size / hfa_type_size (hfa_type);
+ if (nelts <= 8)
+ flags = hfa_type | (size << 8);
+ }
+ else
+ {
+ if (size <= 32)
+ flags = FFI_IA64_TYPE_SMALL_STRUCT | (size << 8);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ cif->flags = flags;
+
+ return FFI_OK;
+}
+
+extern int ffi_call_unix (struct ia64_args *, PTR64, void (*)(), UINT64);
+
+void
+ffi_call(ffi_cif *cif, void (*fn)(), void *rvalue, void **avalue)
+{
+ struct ia64_args *stack;
+ long i, avn, gpcount, fpcount;
+ ffi_type **p_arg;
+
+ FFI_ASSERT (cif->abi == FFI_UNIX);
+
+ /* If we have no spot for a return value, make one. */
+ if (rvalue == NULL && cif->rtype->type != FFI_TYPE_VOID)
+ rvalue = alloca (cif->rtype->size);
+
+ /* Allocate the stack frame. */
+ stack = alloca (cif->bytes);
+
+ gpcount = fpcount = 0;
+ avn = cif->nargs;
+ for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++)
+ {
+ switch ((*p_arg)->type)
+ {
+ case FFI_TYPE_SINT8:
+ stack->gp_regs[gpcount++] = *(SINT8 *)avalue[i];
+ break;
+ case FFI_TYPE_UINT8:
+ stack->gp_regs[gpcount++] = *(UINT8 *)avalue[i];
+ break;
+ case FFI_TYPE_SINT16:
+ stack->gp_regs[gpcount++] = *(SINT16 *)avalue[i];
+ break;
+ case FFI_TYPE_UINT16:
+ stack->gp_regs[gpcount++] = *(UINT16 *)avalue[i];
+ break;
+ case FFI_TYPE_SINT32:
+ stack->gp_regs[gpcount++] = *(SINT32 *)avalue[i];
+ break;
+ case FFI_TYPE_UINT32:
+ stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i];
+ break;
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i];
+ break;
+
+ case FFI_TYPE_POINTER:
+ stack->gp_regs[gpcount++] = (UINT64)(PTR64) *(void **)avalue[i];
+ break;
+
+ case FFI_TYPE_FLOAT:
+ if (gpcount < 8 && fpcount < 8)
+ stf_spill (&stack->fp_regs[fpcount++], *(float *)avalue[i]);
+ stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i];
+ break;
+
+ case FFI_TYPE_DOUBLE:
+ if (gpcount < 8 && fpcount < 8)
+ stf_spill (&stack->fp_regs[fpcount++], *(double *)avalue[i]);
+ stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i];
+ break;
+
+ case FFI_TYPE_LONGDOUBLE:
+ if (gpcount & 1)
+ gpcount++;
+ if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8)
+ stf_spill (&stack->fp_regs[fpcount++], *(__float80 *)avalue[i]);
+ memcpy (&stack->gp_regs[gpcount], avalue[i], 16);
+ gpcount += 2;
+ break;
+
+ case FFI_TYPE_STRUCT:
+ {
+ size_t size = (*p_arg)->size;
+ size_t align = (*p_arg)->alignment;
+ int hfa_type = hfa_element_type (*p_arg, 0);
+
+ FFI_ASSERT (align <= 16);
+ if (align == 16 && (gpcount & 1))
+ gpcount++;
+
+ if (hfa_type != FFI_TYPE_VOID)
+ {
+ size_t hfa_size = hfa_type_size (hfa_type);
+ size_t offset = 0;
+ size_t gp_offset = gpcount * 8;
+
+ while (fpcount < 8
+ && offset < size
+ && gp_offset < 8 * 8)
+ {
+ hfa_type_load (&stack->fp_regs[fpcount], hfa_type,
+ avalue[i] + offset);
+ offset += hfa_size;
+ gp_offset += hfa_size;
+ fpcount += 1;
+ }
+ }
+
+ memcpy (&stack->gp_regs[gpcount], avalue[i], size);
+ gpcount += (size + 7) / 8;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ ffi_call_unix (stack, rvalue, fn, cif->flags);
+}
+
+/* Closures represent a pair consisting of a function pointer, and
+ some user data. A closure is invoked by reinterpreting the closure
+ as a function pointer, and branching to it. Thus we can make an
+ interpreted function callable as a C function: We turn the
+ interpreter itself, together with a pointer specifying the
+ interpreted procedure, into a closure.
+
+ For IA64, function pointer are already pairs consisting of a code
+ pointer, and a gp pointer. The latter is needed to access global
+ variables. Here we set up such a pair as the first two words of
+ the closure (in the "trampoline" area), but we replace the gp
+ pointer with a pointer to the closure itself. We also add the real
+ gp pointer to the closure. This allows the function entry code to
+ both retrieve the user data, and to restire the correct gp pointer. */
+
+extern void ffi_closure_unix ();
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void *codeloc)
+{
+ /* The layout of a function descriptor. A C function pointer really
+ points to one of these. */
+ struct ia64_fd
+ {
+ UINT64 code_pointer;
+ UINT64 gp;
+ };
+
+ struct ffi_ia64_trampoline_struct
+ {
+ UINT64 code_pointer; /* Pointer to ffi_closure_unix. */
+ UINT64 fake_gp; /* Pointer to closure, installed as gp. */
+ UINT64 real_gp; /* Real gp value. */
+ };
+
+ struct ffi_ia64_trampoline_struct *tramp;
+ struct ia64_fd *fd;
+
+ FFI_ASSERT (cif->abi == FFI_UNIX);
+
+ tramp = (struct ffi_ia64_trampoline_struct *)closure->tramp;
+ fd = (struct ia64_fd *)(void *)ffi_closure_unix;
+
+ tramp->code_pointer = fd->code_pointer;
+ tramp->real_gp = fd->gp;
+ tramp->fake_gp = (UINT64)(PTR64)codeloc;
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+
+UINT64
+ffi_closure_unix_inner (ffi_closure *closure, struct ia64_args *stack,
+ void *rvalue, void *r8)
+{
+ ffi_cif *cif;
+ void **avalue;
+ ffi_type **p_arg;
+ long i, avn, gpcount, fpcount;
+
+ cif = closure->cif;
+ avn = cif->nargs;
+ avalue = alloca (avn * sizeof (void *));
+
+ /* If the structure return value is passed in memory get that location
+ from r8 so as to pass the value directly back to the caller. */
+ if (cif->flags == FFI_TYPE_STRUCT)
+ rvalue = r8;
+
+ gpcount = fpcount = 0;
+ for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++)
+ {
+ switch ((*p_arg)->type)
+ {
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT8:
+ avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 1);
+ break;
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT16:
+ avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 2);
+ break;
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 4);
+ break;
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ avalue[i] = &stack->gp_regs[gpcount++];
+ break;
+ case FFI_TYPE_POINTER:
+ avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], sizeof(void*));
+ break;
+
+ case FFI_TYPE_FLOAT:
+ if (gpcount < 8 && fpcount < 8)
+ {
+ fpreg *addr = &stack->fp_regs[fpcount++];
+ float result;
+ avalue[i] = addr;
+ ldf_fill (result, addr);
+ *(float *)addr = result;
+ }
+ else
+ avalue[i] = endian_adjust(&stack->gp_regs[gpcount], 4);
+ gpcount++;
+ break;
+
+ case FFI_TYPE_DOUBLE:
+ if (gpcount < 8 && fpcount < 8)
+ {
+ fpreg *addr = &stack->fp_regs[fpcount++];
+ double result;
+ avalue[i] = addr;
+ ldf_fill (result, addr);
+ *(double *)addr = result;
+ }
+ else
+ avalue[i] = &stack->gp_regs[gpcount];
+ gpcount++;
+ break;
+
+ case FFI_TYPE_LONGDOUBLE:
+ if (gpcount & 1)
+ gpcount++;
+ if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8)
+ {
+ fpreg *addr = &stack->fp_regs[fpcount++];
+ __float80 result;
+ avalue[i] = addr;
+ ldf_fill (result, addr);
+ *(__float80 *)addr = result;
+ }
+ else
+ avalue[i] = &stack->gp_regs[gpcount];
+ gpcount += 2;
+ break;
+
+ case FFI_TYPE_STRUCT:
+ {
+ size_t size = (*p_arg)->size;
+ size_t align = (*p_arg)->alignment;
+ int hfa_type = hfa_element_type (*p_arg, 0);
+
+ FFI_ASSERT (align <= 16);
+ if (align == 16 && (gpcount & 1))
+ gpcount++;
+
+ if (hfa_type != FFI_TYPE_VOID)
+ {
+ size_t hfa_size = hfa_type_size (hfa_type);
+ size_t offset = 0;
+ size_t gp_offset = gpcount * 8;
+ void *addr = alloca (size);
+
+ avalue[i] = addr;
+
+ while (fpcount < 8
+ && offset < size
+ && gp_offset < 8 * 8)
+ {
+ hfa_type_store (hfa_type, addr + offset,
+ &stack->fp_regs[fpcount]);
+ offset += hfa_size;
+ gp_offset += hfa_size;
+ fpcount += 1;
+ }
+
+ if (offset < size)
+ memcpy (addr + offset, (char *)stack->gp_regs + gp_offset,
+ size - offset);
+ }
+ else
+ avalue[i] = &stack->gp_regs[gpcount];
+
+ gpcount += (size + 7) / 8;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ closure->fun (cif, rvalue, avalue, closure->user_data);
+
+ return cif->flags;
+}
diff --git a/libffi-3.0.4/src/ia64/ffitarget.h b/libffi-3.0.4/src/ia64/ffitarget.h
new file mode 100644
index 0000000..d85c049
--- /dev/null
+++ b/libffi-3.0.4/src/ia64/ffitarget.h
@@ -0,0 +1,50 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ Target configuration macros for IA-64.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_ASM
+typedef unsigned long long ffi_arg;
+typedef signed long long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_UNIX, /* Linux and all Unix variants use the same conventions */
+ FFI_DEFAULT_ABI = FFI_UNIX,
+ FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+} ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 24 /* Really the following struct, which */
+ /* can be interpreted as a C function */
+ /* descriptor: */
+
+#endif
+
diff --git a/libffi-3.0.4/src/ia64/ia64_flags.h b/libffi-3.0.4/src/ia64/ia64_flags.h
new file mode 100644
index 0000000..9d652ce
--- /dev/null
+++ b/libffi-3.0.4/src/ia64/ia64_flags.h
@@ -0,0 +1,40 @@
+/* -----------------------------------------------------------------------
+ ia64_flags.h - Copyright (c) 2000 Hewlett Packard Company
+
+ IA64/unix Foreign Function Interface
+
+ Original author: Hans Boehm, HP Labs
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+/* "Type" codes used between assembly and C. When used as a part of
+ a cfi->flags value, the low byte will be these extra type codes,
+ and bits 8-31 will be the actual size of the type. */
+
+/* Small structures containing N words in integer registers. */
+#define FFI_IA64_TYPE_SMALL_STRUCT (FFI_TYPE_LAST + 1)
+
+/* Homogeneous Floating Point Aggregates (HFAs) which are returned
+ in FP registers. */
+#define FFI_IA64_TYPE_HFA_FLOAT (FFI_TYPE_LAST + 2)
+#define FFI_IA64_TYPE_HFA_DOUBLE (FFI_TYPE_LAST + 3)
+#define FFI_IA64_TYPE_HFA_LDOUBLE (FFI_TYPE_LAST + 4)
diff --git a/libffi-3.0.4/src/ia64/unix.S b/libffi-3.0.4/src/ia64/unix.S
new file mode 100644
index 0000000..2d10296
--- /dev/null
+++ b/libffi-3.0.4/src/ia64/unix.S
@@ -0,0 +1,556 @@
+/* -----------------------------------------------------------------------
+ unix.S - Copyright (c) 1998 Red Hat, Inc.
+ Copyright (c) 2000 Hewlett Packard Company
+
+ IA64/unix Foreign Function Interface
+
+ Primary author: Hans Boehm, HP Labs
+
+ Loosely modeled on Cygnus code for other platforms.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+#include "ia64_flags.h"
+
+ .pred.safe_across_calls p1-p5,p16-p63
+.text
+
+/* int ffi_call_unix (struct ia64_args *stack, PTR64 rvalue,
+ void (*fn)(), int flags);
+ */
+
+ .align 16
+ .global ffi_call_unix
+ .proc ffi_call_unix
+ffi_call_unix:
+ .prologue
+ /* Bit o trickiness. We actually share a stack frame with ffi_call.
+ Rely on the fact that ffi_call uses a vframe and don't bother
+ tracking one here at all. */
+ .fframe 0
+ .save ar.pfs, r36 // loc0
+ alloc loc0 = ar.pfs, 4, 3, 8, 0
+ .save rp, loc1
+ mov loc1 = b0
+ .body
+ add r16 = 16, in0
+ mov loc2 = gp
+ mov r8 = in1
+ ;;
+
+ /* Load up all of the argument registers. */
+ ldf.fill f8 = [in0], 32
+ ldf.fill f9 = [r16], 32
+ ;;
+ ldf.fill f10 = [in0], 32
+ ldf.fill f11 = [r16], 32
+ ;;
+ ldf.fill f12 = [in0], 32
+ ldf.fill f13 = [r16], 32
+ ;;
+ ldf.fill f14 = [in0], 32
+ ldf.fill f15 = [r16], 24
+ ;;
+ ld8 out0 = [in0], 16
+ ld8 out1 = [r16], 16
+ ;;
+ ld8 out2 = [in0], 16
+ ld8 out3 = [r16], 16
+ ;;
+ ld8 out4 = [in0], 16
+ ld8 out5 = [r16], 16
+ ;;
+ ld8 out6 = [in0]
+ ld8 out7 = [r16]
+ ;;
+
+ /* Deallocate the register save area from the stack frame. */
+ mov sp = in0
+
+ /* Call the target function. */
+ ld8 r16 = [in2], 8
+ ;;
+ ld8 gp = [in2]
+ mov b6 = r16
+ br.call.sptk.many b0 = b6
+ ;;
+
+ /* Dispatch to handle return value. */
+ mov gp = loc2
+ zxt1 r16 = in3
+ ;;
+ mov ar.pfs = loc0
+ addl r18 = @ltoffx(.Lst_table), gp
+ ;;
+ ld8.mov r18 = [r18], .Lst_table
+ mov b0 = loc1
+ ;;
+ shladd r18 = r16, 3, r18
+ ;;
+ ld8 r17 = [r18]
+ shr in3 = in3, 8
+ ;;
+ add r17 = r17, r18
+ ;;
+ mov b6 = r17
+ br b6
+ ;;
+
+.Lst_void:
+ br.ret.sptk.many b0
+ ;;
+.Lst_uint8:
+ zxt1 r8 = r8
+ ;;
+ st8 [in1] = r8
+ br.ret.sptk.many b0
+ ;;
+.Lst_sint8:
+ sxt1 r8 = r8
+ ;;
+ st8 [in1] = r8
+ br.ret.sptk.many b0
+ ;;
+.Lst_uint16:
+ zxt2 r8 = r8
+ ;;
+ st8 [in1] = r8
+ br.ret.sptk.many b0
+ ;;
+.Lst_sint16:
+ sxt2 r8 = r8
+ ;;
+ st8 [in1] = r8
+ br.ret.sptk.many b0
+ ;;
+.Lst_uint32:
+ zxt4 r8 = r8
+ ;;
+ st8 [in1] = r8
+ br.ret.sptk.many b0
+ ;;
+.Lst_sint32:
+ sxt4 r8 = r8
+ ;;
+ st8 [in1] = r8
+ br.ret.sptk.many b0
+ ;;
+.Lst_int64:
+ st8 [in1] = r8
+ br.ret.sptk.many b0
+ ;;
+.Lst_float:
+ stfs [in1] = f8
+ br.ret.sptk.many b0
+ ;;
+.Lst_double:
+ stfd [in1] = f8
+ br.ret.sptk.many b0
+ ;;
+.Lst_ldouble:
+ stfe [in1] = f8
+ br.ret.sptk.many b0
+ ;;
+
+.Lst_small_struct:
+ add sp = -16, sp
+ cmp.lt p6, p0 = 8, in3
+ cmp.lt p7, p0 = 16, in3
+ cmp.lt p8, p0 = 24, in3
+ ;;
+ add r16 = 8, sp
+ add r17 = 16, sp
+ add r18 = 24, sp
+ ;;
+ st8 [sp] = r8
+(p6) st8 [r16] = r9
+ mov out0 = in1
+(p7) st8 [r17] = r10
+(p8) st8 [r18] = r11
+ mov out1 = sp
+ mov out2 = in3
+ br.call.sptk.many b0 = memcpy#
+ ;;
+ mov ar.pfs = loc0
+ mov b0 = loc1
+ mov gp = loc2
+ br.ret.sptk.many b0
+
+.Lst_hfa_float:
+ add r16 = 4, in1
+ cmp.lt p6, p0 = 4, in3
+ ;;
+ stfs [in1] = f8, 8
+(p6) stfs [r16] = f9, 8
+ cmp.lt p7, p0 = 8, in3
+ cmp.lt p8, p0 = 12, in3
+ ;;
+(p7) stfs [in1] = f10, 8
+(p8) stfs [r16] = f11, 8
+ cmp.lt p9, p0 = 16, in3
+ cmp.lt p10, p0 = 20, in3
+ ;;
+(p9) stfs [in1] = f12, 8
+(p10) stfs [r16] = f13, 8
+ cmp.lt p6, p0 = 24, in3
+ cmp.lt p7, p0 = 28, in3
+ ;;
+(p6) stfs [in1] = f14
+(p7) stfs [r16] = f15
+ br.ret.sptk.many b0
+ ;;
+
+.Lst_hfa_double:
+ add r16 = 8, in1
+ cmp.lt p6, p0 = 8, in3
+ ;;
+ stfd [in1] = f8, 16
+(p6) stfd [r16] = f9, 16
+ cmp.lt p7, p0 = 16, in3
+ cmp.lt p8, p0 = 24, in3
+ ;;
+(p7) stfd [in1] = f10, 16
+(p8) stfd [r16] = f11, 16
+ cmp.lt p9, p0 = 32, in3
+ cmp.lt p10, p0 = 40, in3
+ ;;
+(p9) stfd [in1] = f12, 16
+(p10) stfd [r16] = f13, 16
+ cmp.lt p6, p0 = 48, in3
+ cmp.lt p7, p0 = 56, in3
+ ;;
+(p6) stfd [in1] = f14
+(p7) stfd [r16] = f15
+ br.ret.sptk.many b0
+ ;;
+
+.Lst_hfa_ldouble:
+ add r16 = 16, in1
+ cmp.lt p6, p0 = 16, in3
+ ;;
+ stfe [in1] = f8, 32
+(p6) stfe [r16] = f9, 32
+ cmp.lt p7, p0 = 32, in3
+ cmp.lt p8, p0 = 48, in3
+ ;;
+(p7) stfe [in1] = f10, 32
+(p8) stfe [r16] = f11, 32
+ cmp.lt p9, p0 = 64, in3
+ cmp.lt p10, p0 = 80, in3
+ ;;
+(p9) stfe [in1] = f12, 32
+(p10) stfe [r16] = f13, 32
+ cmp.lt p6, p0 = 96, in3
+ cmp.lt p7, p0 = 112, in3
+ ;;
+(p6) stfe [in1] = f14
+(p7) stfe [r16] = f15
+ br.ret.sptk.many b0
+ ;;
+
+ .endp ffi_call_unix
+
+ .align 16
+ .global ffi_closure_unix
+ .proc ffi_closure_unix
+
+#define FRAME_SIZE (8*16 + 8*8 + 8*16)
+
+ffi_closure_unix:
+ .prologue
+ .save ar.pfs, r40 // loc0
+ alloc loc0 = ar.pfs, 8, 4, 4, 0
+ .fframe FRAME_SIZE
+ add r12 = -FRAME_SIZE, r12
+ .save rp, loc1
+ mov loc1 = b0
+ .save ar.unat, loc2
+ mov loc2 = ar.unat
+ .body
+
+ /* Retrieve closure pointer and real gp. */
+#ifdef _ILP32
+ addp4 out0 = 0, gp
+ addp4 gp = 16, gp
+#else
+ mov out0 = gp
+ add gp = 16, gp
+#endif
+ ;;
+ ld8 gp = [gp]
+
+ /* Spill all of the possible argument registers. */
+ add r16 = 16 + 8*16, sp
+ add r17 = 16 + 8*16 + 16, sp
+ ;;
+ stf.spill [r16] = f8, 32
+ stf.spill [r17] = f9, 32
+ mov loc3 = gp
+ ;;
+ stf.spill [r16] = f10, 32
+ stf.spill [r17] = f11, 32
+ ;;
+ stf.spill [r16] = f12, 32
+ stf.spill [r17] = f13, 32
+ ;;
+ stf.spill [r16] = f14, 32
+ stf.spill [r17] = f15, 24
+ ;;
+ .mem.offset 0, 0
+ st8.spill [r16] = in0, 16
+ .mem.offset 8, 0
+ st8.spill [r17] = in1, 16
+ add out1 = 16 + 8*16, sp
+ ;;
+ .mem.offset 0, 0
+ st8.spill [r16] = in2, 16
+ .mem.offset 8, 0
+ st8.spill [r17] = in3, 16
+ add out2 = 16, sp
+ ;;
+ .mem.offset 0, 0
+ st8.spill [r16] = in4, 16
+ .mem.offset 8, 0
+ st8.spill [r17] = in5, 16
+ mov out3 = r8
+ ;;
+ .mem.offset 0, 0
+ st8.spill [r16] = in6
+ .mem.offset 8, 0
+ st8.spill [r17] = in7
+
+ /* Invoke ffi_closure_unix_inner for the hard work. */
+ br.call.sptk.many b0 = ffi_closure_unix_inner
+ ;;
+
+ /* Dispatch to handle return value. */
+ mov gp = loc3
+ zxt1 r16 = r8
+ ;;
+ addl r18 = @ltoffx(.Lld_table), gp
+ mov ar.pfs = loc0
+ ;;
+ ld8.mov r18 = [r18], .Lld_table
+ mov b0 = loc1
+ ;;
+ shladd r18 = r16, 3, r18
+ mov ar.unat = loc2
+ ;;
+ ld8 r17 = [r18]
+ shr r8 = r8, 8
+ ;;
+ add r17 = r17, r18
+ add r16 = 16, sp
+ ;;
+ mov b6 = r17
+ br b6
+ ;;
+ .label_state 1
+
+.Lld_void:
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+.Lld_int:
+ .body
+ .copy_state 1
+ ld8 r8 = [r16]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+.Lld_float:
+ .body
+ .copy_state 1
+ ldfs f8 = [r16]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+.Lld_double:
+ .body
+ .copy_state 1
+ ldfd f8 = [r16]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+.Lld_ldouble:
+ .body
+ .copy_state 1
+ ldfe f8 = [r16]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+
+.Lld_small_struct:
+ .body
+ .copy_state 1
+ add r17 = 8, r16
+ cmp.lt p6, p0 = 8, r8
+ cmp.lt p7, p0 = 16, r8
+ cmp.lt p8, p0 = 24, r8
+ ;;
+ ld8 r8 = [r16], 16
+(p6) ld8 r9 = [r17], 16
+ ;;
+(p7) ld8 r10 = [r16]
+(p8) ld8 r11 = [r17]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+
+.Lld_hfa_float:
+ .body
+ .copy_state 1
+ add r17 = 4, r16
+ cmp.lt p6, p0 = 4, r8
+ ;;
+ ldfs f8 = [r16], 8
+(p6) ldfs f9 = [r17], 8
+ cmp.lt p7, p0 = 8, r8
+ cmp.lt p8, p0 = 12, r8
+ ;;
+(p7) ldfs f10 = [r16], 8
+(p8) ldfs f11 = [r17], 8
+ cmp.lt p9, p0 = 16, r8
+ cmp.lt p10, p0 = 20, r8
+ ;;
+(p9) ldfs f12 = [r16], 8
+(p10) ldfs f13 = [r17], 8
+ cmp.lt p6, p0 = 24, r8
+ cmp.lt p7, p0 = 28, r8
+ ;;
+(p6) ldfs f14 = [r16]
+(p7) ldfs f15 = [r17]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+
+.Lld_hfa_double:
+ .body
+ .copy_state 1
+ add r17 = 8, r16
+ cmp.lt p6, p0 = 8, r8
+ ;;
+ ldfd f8 = [r16], 16
+(p6) ldfd f9 = [r17], 16
+ cmp.lt p7, p0 = 16, r8
+ cmp.lt p8, p0 = 24, r8
+ ;;
+(p7) ldfd f10 = [r16], 16
+(p8) ldfd f11 = [r17], 16
+ cmp.lt p9, p0 = 32, r8
+ cmp.lt p10, p0 = 40, r8
+ ;;
+(p9) ldfd f12 = [r16], 16
+(p10) ldfd f13 = [r17], 16
+ cmp.lt p6, p0 = 48, r8
+ cmp.lt p7, p0 = 56, r8
+ ;;
+(p6) ldfd f14 = [r16]
+(p7) ldfd f15 = [r17]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+
+.Lld_hfa_ldouble:
+ .body
+ .copy_state 1
+ add r17 = 16, r16
+ cmp.lt p6, p0 = 16, r8
+ ;;
+ ldfe f8 = [r16], 32
+(p6) ldfe f9 = [r17], 32
+ cmp.lt p7, p0 = 32, r8
+ cmp.lt p8, p0 = 48, r8
+ ;;
+(p7) ldfe f10 = [r16], 32
+(p8) ldfe f11 = [r17], 32
+ cmp.lt p9, p0 = 64, r8
+ cmp.lt p10, p0 = 80, r8
+ ;;
+(p9) ldfe f12 = [r16], 32
+(p10) ldfe f13 = [r17], 32
+ cmp.lt p6, p0 = 96, r8
+ cmp.lt p7, p0 = 112, r8
+ ;;
+(p6) ldfe f14 = [r16]
+(p7) ldfe f15 = [r17]
+ .restore sp
+ add sp = FRAME_SIZE, sp
+ br.ret.sptk.many b0
+ ;;
+
+ .endp ffi_closure_unix
+
+ .section .rodata
+ .align 8
+.Lst_table:
+ data8 @pcrel(.Lst_void) // FFI_TYPE_VOID
+ data8 @pcrel(.Lst_sint32) // FFI_TYPE_INT
+ data8 @pcrel(.Lst_float) // FFI_TYPE_FLOAT
+ data8 @pcrel(.Lst_double) // FFI_TYPE_DOUBLE
+ data8 @pcrel(.Lst_ldouble) // FFI_TYPE_LONGDOUBLE
+ data8 @pcrel(.Lst_uint8) // FFI_TYPE_UINT8
+ data8 @pcrel(.Lst_sint8) // FFI_TYPE_SINT8
+ data8 @pcrel(.Lst_uint16) // FFI_TYPE_UINT16
+ data8 @pcrel(.Lst_sint16) // FFI_TYPE_SINT16
+ data8 @pcrel(.Lst_uint32) // FFI_TYPE_UINT32
+ data8 @pcrel(.Lst_sint32) // FFI_TYPE_SINT32
+ data8 @pcrel(.Lst_int64) // FFI_TYPE_UINT64
+ data8 @pcrel(.Lst_int64) // FFI_TYPE_SINT64
+ data8 @pcrel(.Lst_void) // FFI_TYPE_STRUCT
+ data8 @pcrel(.Lst_int64) // FFI_TYPE_POINTER
+ data8 @pcrel(.Lst_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT
+ data8 @pcrel(.Lst_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT
+ data8 @pcrel(.Lst_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE
+ data8 @pcrel(.Lst_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE
+
+.Lld_table:
+ data8 @pcrel(.Lld_void) // FFI_TYPE_VOID
+ data8 @pcrel(.Lld_int) // FFI_TYPE_INT
+ data8 @pcrel(.Lld_float) // FFI_TYPE_FLOAT
+ data8 @pcrel(.Lld_double) // FFI_TYPE_DOUBLE
+ data8 @pcrel(.Lld_ldouble) // FFI_TYPE_LONGDOUBLE
+ data8 @pcrel(.Lld_int) // FFI_TYPE_UINT8
+ data8 @pcrel(.Lld_int) // FFI_TYPE_SINT8
+ data8 @pcrel(.Lld_int) // FFI_TYPE_UINT16
+ data8 @pcrel(.Lld_int) // FFI_TYPE_SINT16
+ data8 @pcrel(.Lld_int) // FFI_TYPE_UINT32
+ data8 @pcrel(.Lld_int) // FFI_TYPE_SINT32
+ data8 @pcrel(.Lld_int) // FFI_TYPE_UINT64
+ data8 @pcrel(.Lld_int) // FFI_TYPE_SINT64
+ data8 @pcrel(.Lld_void) // FFI_TYPE_STRUCT
+ data8 @pcrel(.Lld_int) // FFI_TYPE_POINTER
+ data8 @pcrel(.Lld_small_struct) // FFI_IA64_TYPE_SMALL_STRUCT
+ data8 @pcrel(.Lld_hfa_float) // FFI_IA64_TYPE_HFA_FLOAT
+ data8 @pcrel(.Lld_hfa_double) // FFI_IA64_TYPE_HFA_DOUBLE
+ data8 @pcrel(.Lld_hfa_ldouble) // FFI_IA64_TYPE_HFA_LDOUBLE