Commit 2edfbab4 authored by Joachim Breitner's avatar Joachim Breitner

ghc-jessie: Colins upload of 7.6.3-9

parent 4abc2a9a
ghc (7.6.3-9) unstable; urgency=medium
* Team upload.
* Ensure that all copies of config.guess and config.sub in the tree are up
to date at build time.
* Add arm64 support.
-- Colin Watson <cjwatson@debian.org> Sat, 05 Apr 2014 02:00:21 +0100
ghc (7.6.3-8) unstable; urgency=medium
* Apply a4b1a435 from upstream, to fix building on 64 big endian platforms
......
......@@ -12,6 +12,7 @@ Build-Depends:
ghc,
grep-dctrl,
dh-autoreconf,
autotools-dev,
llvm [armel armhf],
libffi-dev,
pkg-config,
......
Description: Add arm64 support
Author: Karel Gardas <karel.gardas@centrum.cz>
Author: Colin Watson <cjwatson@ubuntu.com>
Bug: https://ghc.haskell.org/trac/ghc/ticket/7942
Last-Update: 2014-04-04
Index: b/aclocal.m4
===================================================================
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -173,7 +173,7 @@
GET_ARM_ISA()
test -z "[$]2" || eval "[$]2=\"ArchARM {armISA = \$ARM_ISA, armISAExt = \$ARM_ISA_EXT, armABI = \$ARM_ABI}\""
;;
- alpha|mips|mipseb|mipsel|hppa|hppa1_1|ia64|m68k|rs6000|s390|s390x|sparc64|vax)
+ aarch64|alpha|mips|mipseb|mipsel|hppa|hppa1_1|ia64|m68k|rs6000|s390|s390x|sparc64|vax)
test -z "[$]2" || eval "[$]2=ArchUnknown"
;;
*)
@@ -1835,6 +1835,9 @@
# converts cpu from gnu to ghc naming, and assigns the result to $target_var
AC_DEFUN([GHC_CONVERT_CPU],[
case "$1" in
+ aarch64*)
+ $2="aarch64"
+ ;;
alpha*)
$2="alpha"
;;
Index: b/includes/stg/MachRegs.h
===================================================================
--- a/includes/stg/MachRegs.h
+++ b/includes/stg/MachRegs.h
@@ -43,6 +43,7 @@
#define powerpc_REGS (powerpc_TARGET_ARCH || powerpc64_TARGET_ARCH || rs6000_TARGET_ARCH)
#define sparc_REGS sparc_TARGET_ARCH
#define arm_REGS arm_TARGET_ARCH
+#define aarch64_REGS aarch64_TARGET_ARCH
#define darwin_REGS darwin_TARGET_OS
#else
#define i386_REGS i386_HOST_ARCH
@@ -50,6 +51,7 @@
#define powerpc_REGS (powerpc_HOST_ARCH || powerpc64_HOST_ARCH || rs6000_HOST_ARCH)
#define sparc_REGS sparc_HOST_ARCH
#define arm_REGS arm_HOST_ARCH
+#define aarch64_REGS aarch64_HOST_ARCH
#define darwin_REGS darwin_HOST_OS
#endif
@@ -461,6 +463,63 @@
#endif /* arm */
+/* -----------------------------------------------------------------------------
+ The ARMv8/AArch64 ABI register mapping
+
+ The AArch64 provides 31 64-bit general purpose registers
+ and 32 128-bit SIMD/floating point registers.
+
+ General purpose registers (see Chapter 5.1.1 in ARM IHI 0055B)
+
+ Register | Special | Role in the procedure call standard
+ ---------+---------+------------------------------------
+ SP | | The Stack Pointer
+ r30 | LR | The Link Register
+ r29 | FP | The Frame Pointer
+ r19-r28 | | Callee-saved registers
+ r18 | | The Platform Register, if needed;
+ | | or temporary register
+ r17 | IP1 | The second intra-procedure-call temporary register
+ r16 | IP0 | The first intra-procedure-call scratch register
+ r9-r15 | | Temporary registers
+ r8 | | Indirect result location register
+ r0-r7 | | Parameter/result registers
+
+
+ FPU/SIMD registers
+
+ s/d/q/v0-v7 Argument / result/ scratch registers
+ s/d/q/v8-v15 callee-saved registers (must be preserved across subrutine calls,
+ but only bottom 64-bit value needs to be preserved)
+ s/d/q/v16-v31 temporary registers
+
+ ----------------------------------------------------------------------------- */
+
+#if aarch64_REGS
+
+#define REG(x) __asm__(#x)
+
+#define REG_Base r19
+#define REG_Sp r20
+#define REG_Hp r21
+#define REG_R1 r22
+#define REG_R2 r23
+#define REG_R3 r24
+#define REG_R4 r25
+#define REG_R5 r26
+#define REG_R6 r27
+#define REG_SpLim r28
+
+#define REG_F1 s8
+#define REG_F2 s9
+#define REG_F3 s10
+#define REG_F4 s11
+
+#define REG_D1 d12
+#define REG_D2 d13
+
+#endif /* aarch64 */
+
#endif /* NO_REGS */
/* -----------------------------------------------------------------------------
Index: b/libffi/aarch64.patch
===================================================================
--- /dev/null
+++ b/libffi/aarch64.patch
@@ -0,0 +1,1511 @@
+2012-10-30 James Greenhalgh <james.greenhalgh at arm.com>
+ Marcus Shawcroft <marcus.shawcroft at arm.com>
+
+ * src/aarch64/ffi.c: New.
+ * src/aarch64/ffitarget.h: Likewise.
+ * src/aarch64/sysv.S: Likewise.
+ * Makefile.am: Support aarch64.
+ * configure.ac: Support aarch64.
+
+Index: b/Makefile.am
+===================================================================
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -5,6 +5,7 @@
+ SUBDIRS = include testsuite man
+
+ EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \
++ src/aarch64/ffi.c src/aarch64/ffitarget.h src/aarch64/sysv.S \
+ src/alpha/ffi.c src/alpha/osf.S src/alpha/ffitarget.h \
+ src/arm/ffi.c src/arm/sysv.S src/arm/ffitarget.h \
+ src/avr32/ffi.c src/avr32/sysv.S src/avr32/ffitarget.h \
+@@ -147,6 +148,9 @@
+ if POWERPC_FREEBSD
+ nodist_libffi_la_SOURCES += src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S
+ endif
++if AARCH64
++nodist_libffi_la_SOURCES += src/aarch64/sysv.S src/aarch64/ffi.c
++endif
+ if ARM
+ nodist_libffi_la_SOURCES += src/arm/sysv.S src/arm/ffi.c
+ if FFI_EXEC_TRAMPOLINE_TABLE
+Index: b/configure.ac
+===================================================================
+--- a/configure.ac
++++ b/configure.ac
+@@ -53,6 +53,10 @@
+
+ TARGETDIR="unknown"
+ case "$host" in
++ aarch64*-*-*)
++ TARGET=AARCH64; TARGETDIR=aarch64
++ ;;
++
+ alpha*-*-*)
+ TARGET=ALPHA; TARGETDIR=alpha;
+ # Support 128-bit long double, changeable via command-line switch.
+@@ -228,6 +232,7 @@
+ AM_CONDITIONAL(POWERPC_AIX, test x$TARGET = xPOWERPC_AIX)
+ AM_CONDITIONAL(POWERPC_DARWIN, test x$TARGET = xPOWERPC_DARWIN)
+ AM_CONDITIONAL(POWERPC_FREEBSD, test x$TARGET = xPOWERPC_FREEBSD)
++AM_CONDITIONAL(AARCH64, test x$TARGET = xAARCH64)
+ AM_CONDITIONAL(ARM, test x$TARGET = xARM)
+ AM_CONDITIONAL(AVR32, test x$TARGET = xAVR32)
+ AM_CONDITIONAL(LIBFFI_CRIS, test x$TARGET = xLIBFFI_CRIS)
+Index: b/src/aarch64/ffi.c
+===================================================================
+--- /dev/null
++++ b/src/aarch64/ffi.c
+@@ -0,0 +1,1076 @@
++/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
++
++Permission is hereby granted, free of charge, to any person obtaining
++a copy of this software and associated documentation files (the
++``Software''), to deal in the Software without restriction, including
++without limitation the rights to use, copy, modify, merge, publish,
++distribute, sublicense, and/or sell copies of the Software, and to
++permit persons to whom the Software is furnished to do so, subject to
++the following conditions:
++
++The above copyright notice and this permission notice shall be
++included in all copies or substantial portions of the Software.
++
++THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
++EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
++
++#include <stdio.h>
++
++#include <ffi.h>
++#include <ffi_common.h>
++
++#include <stdlib.h>
++
++/* Stack alignment requirement in bytes */
++#define AARCH64_STACK_ALIGN 16
++
++#define N_X_ARG_REG 8
++#define N_V_ARG_REG 8
++
++#define AARCH64_FFI_WITH_V (1 << AARCH64_FFI_WITH_V_BIT)
++
++union _d
++{
++ UINT64 d;
++ UINT32 s[2];
++};
++
++struct call_context
++{
++ UINT64 x [AARCH64_N_XREG];
++ struct
++ {
++ union _d d[2];
++ } v [AARCH64_N_VREG];
++};
++
++static void *
++get_x_addr (struct call_context *context, unsigned n)
++{
++ return &context->x[n];
++}
++
++static void *
++get_s_addr (struct call_context *context, unsigned n)
++{
++#if defined __AARCH64EB__
++ return &context->v[n].d[1].s[1];
++#else
++ return &context->v[n].d[0].s[0];
++#endif
++}
++
++static void *
++get_d_addr (struct call_context *context, unsigned n)
++{
++#if defined __AARCH64EB__
++ return &context->v[n].d[1];
++#else
++ return &context->v[n].d[0];
++#endif
++}
++
++static void *
++get_v_addr (struct call_context *context, unsigned n)
++{
++ return &context->v[n];
++}
++
++/* Return the memory location at which a basic type would reside
++ were it to have been stored in register n. */
++
++static void *
++get_basic_type_addr (unsigned short type, struct call_context *context,
++ unsigned n)
++{
++ switch (type)
++ {
++ case FFI_TYPE_FLOAT:
++ return get_s_addr (context, n);
++ case FFI_TYPE_DOUBLE:
++ return get_d_addr (context, n);
++ case FFI_TYPE_LONGDOUBLE:
++ return get_v_addr (context, n);
++ case FFI_TYPE_UINT8:
++ case FFI_TYPE_SINT8:
++ case FFI_TYPE_UINT16:
++ case FFI_TYPE_SINT16:
++ case FFI_TYPE_UINT32:
++ case FFI_TYPE_SINT32:
++ case FFI_TYPE_INT:
++ case FFI_TYPE_POINTER:
++ case FFI_TYPE_UINT64:
++ case FFI_TYPE_SINT64:
++ return get_x_addr (context, n);
++ default:
++ FFI_ASSERT (0);
++ return NULL;
++ }
++}
++
++/* Return the alignment width for each of the basic types. */
++
++static size_t
++get_basic_type_alignment (unsigned short type)
++{
++ switch (type)
++ {
++ case FFI_TYPE_FLOAT:
++ case FFI_TYPE_DOUBLE:
++ return sizeof (UINT64);
++ case FFI_TYPE_LONGDOUBLE:
++ return sizeof (long double);
++ case FFI_TYPE_UINT8:
++ case FFI_TYPE_SINT8:
++ case FFI_TYPE_UINT16:
++ case FFI_TYPE_SINT16:
++ case FFI_TYPE_UINT32:
++ case FFI_TYPE_INT:
++ case FFI_TYPE_SINT32:
++ case FFI_TYPE_POINTER:
++ case FFI_TYPE_UINT64:
++ case FFI_TYPE_SINT64:
++ return sizeof (UINT64);
++
++ default:
++ FFI_ASSERT (0);
++ return 0;
++ }
++}
++
++/* Return the size in bytes for each of the basic types. */
++
++static size_t
++get_basic_type_size (unsigned short type)
++{
++ switch (type)
++ {
++ case FFI_TYPE_FLOAT:
++ return sizeof (UINT32);
++ case FFI_TYPE_DOUBLE:
++ return sizeof (UINT64);
++ case FFI_TYPE_LONGDOUBLE:
++ return sizeof (long double);
++ case FFI_TYPE_UINT8:
++ return sizeof (UINT8);
++ case FFI_TYPE_SINT8:
++ return sizeof (SINT8);
++ case FFI_TYPE_UINT16:
++ return sizeof (UINT16);
++ case FFI_TYPE_SINT16:
++ return sizeof (SINT16);
++ case FFI_TYPE_UINT32:
++ return sizeof (UINT32);
++ case FFI_TYPE_INT:
++ case FFI_TYPE_SINT32:
++ return sizeof (SINT32);
++ case FFI_TYPE_POINTER:
++ case FFI_TYPE_UINT64:
++ return sizeof (UINT64);
++ case FFI_TYPE_SINT64:
++ return sizeof (SINT64);
++
++ default:
++ FFI_ASSERT (0);
++ return 0;
++ }
++}
++
++extern void
++ffi_call_SYSV (unsigned (*)(struct call_context *context, unsigned char *,
++ extended_cif *),
++ struct call_context *context,
++ extended_cif *,
++ unsigned,
++ void (*fn)(void));
++
++extern void
++ffi_closure_SYSV (ffi_closure *);
++
++/* Test for an FFI floating point representation. */
++
++static unsigned
++is_floating_type (unsigned short type)
++{
++ return (type == FFI_TYPE_FLOAT || type == FFI_TYPE_DOUBLE
++ || type == FFI_TYPE_LONGDOUBLE);
++}
++
++/* Test for a homogeneous structure. */
++
++static unsigned short
++get_homogeneous_type (ffi_type *ty)
++{
++ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
++ {
++ unsigned i;
++ unsigned short candidate_type
++ = get_homogeneous_type (ty->elements[0]);
++ for (i =1; ty->elements[i]; i++)
++ {
++ unsigned short iteration_type = 0;
++ /* If we have a nested struct, we must find its homogeneous type.
++ If that fits with our candidate type, we are still
++ homogeneous. */
++ if (ty->elements[i]->type == FFI_TYPE_STRUCT
++ && ty->elements[i]->elements)
++ {
++ iteration_type = get_homogeneous_type (ty->elements[i]);
++ }
++ else
++ {
++ iteration_type = ty->elements[i]->type;
++ }
++
++ /* If we are not homogeneous, return FFI_TYPE_STRUCT. */
++ if (candidate_type != iteration_type)
++ return FFI_TYPE_STRUCT;
++ }
++ return candidate_type;
++ }
++
++ /* Base case, we have no more levels of nesting, so we
++ are a basic type, and so, trivially homogeneous in that type. */
++ return ty->type;
++}
++
++/* Determine the number of elements within a STRUCT.
++
++ Note, we must handle nested structs.
++
++ If ty is not a STRUCT this function will return 0. */
++
++static unsigned
++element_count (ffi_type *ty)
++{
++ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
++ {
++ unsigned n;
++ unsigned elems = 0;
++ for (n = 0; ty->elements[n]; n++)
++ {
++ if (ty->elements[n]->type == FFI_TYPE_STRUCT
++ && ty->elements[n]->elements)
++ elems += element_count (ty->elements[n]);
++ else
++ elems++;
++ }
++ return elems;
++ }
++ return 0;
++}
++
++/* Test for a homogeneous floating point aggregate.
++
++ A homogeneous floating point aggregate is a homogeneous aggregate of
++ a half- single- or double- precision floating point type with one
++ to four elements. Note that this includes nested structs of the
++ basic type. */
++
++static int
++is_hfa (ffi_type *ty)
++{
++ if (ty->type == FFI_TYPE_STRUCT
++ && ty->elements[0]
++ && is_floating_type (get_homogeneous_type (ty)))
++ {
++ unsigned n = element_count (ty);
++ return n >= 1 && n <= 4;
++ }
++ return 0;
++}
++
++/* Test if an ffi_type is a candidate for passing in a register.
++
++ This test does not check that sufficient registers of the
++ appropriate class are actually available, merely that IFF
++ sufficient registers are available then the argument will be passed
++ in register(s).
++
++ Note that an ffi_type that is deemed to be a register candidate
++ will always be returned in registers.
++
++ Returns 1 if a register candidate else 0. */
++
++static int
++is_register_candidate (ffi_type *ty)
++{
++ switch (ty->type)
++ {
++ case FFI_TYPE_VOID:
++ case FFI_TYPE_FLOAT:
++ case FFI_TYPE_DOUBLE:
++ case FFI_TYPE_LONGDOUBLE:
++ case FFI_TYPE_UINT8:
++ case FFI_TYPE_UINT16:
++ case FFI_TYPE_UINT32:
++ case FFI_TYPE_UINT64:
++ case FFI_TYPE_POINTER:
++ case FFI_TYPE_SINT8:
++ case FFI_TYPE_SINT16:
++ case FFI_TYPE_SINT32:
++ case FFI_TYPE_INT:
++ case FFI_TYPE_SINT64:
++ return 1;
++
++ case FFI_TYPE_STRUCT:
++ if (is_hfa (ty))
++ {
++ return 1;
++ }
++ else if (ty->size > 16)
++ {
++ /* Too large. Will be replaced with a pointer to memory. The
++ pointer MAY be passed in a register, but the value will
++ not. This test specifically fails since the argument will
++ never be passed by value in registers. */
++ return 0;
++ }
++ else
++ {
++ /* Might be passed in registers depending on the number of
++ registers required. */
++ return (ty->size + 7) / 8 < N_X_ARG_REG;
++ }
++ break;
++
++ default:
++ FFI_ASSERT (0);
++ break;
++ }
++
++ return 0;
++}
++
++/* Test if an ffi_type argument or result is a candidate for a vector
++ register. */
++
++static int
++is_v_register_candidate (ffi_type *ty)
++{
++ return is_floating_type (ty->type)
++ || (ty->type == FFI_TYPE_STRUCT && is_hfa (ty));
++}
++
++/* Representation of the procedure call argument marshalling
++ state.
++
++ The terse state variable names match the names used in the AARCH64
++ PCS. */
++
++struct arg_state
++{
++ unsigned ngrn; /* Next general-purpose register number. */
++ unsigned nsrn; /* Next vector register number. */
++ unsigned nsaa; /* Next stack offset. */
++};
++
++/* Initialize a procedure call argument marshalling state. */
++static void
++arg_init (struct arg_state *state, unsigned call_frame_size)
++{
++ state->ngrn = 0;
++ state->nsrn = 0;
++ state->nsaa = 0;
++}
++
++/* Return the number of available consecutive core argument
++ registers. */
++
++static unsigned
++available_x (struct arg_state *state)
++{
++ return N_X_ARG_REG - state->ngrn;
++}
++
++/* Return the number of available consecutive vector argument
++ registers. */
++
++static unsigned
++available_v (struct arg_state *state)
++{
++ return N_V_ARG_REG - state->nsrn;
++}
++
++static void *
++allocate_to_x (struct call_context *context, struct arg_state *state)
++{
++ FFI_ASSERT (state->ngrn < N_X_ARG_REG)
++ return get_x_addr (context, (state->ngrn)++);
++}
++
++static void *
++allocate_to_s (struct call_context *context, struct arg_state *state)
++{
++ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
++ return get_s_addr (context, (state->nsrn)++);
++}
++
++static void *
++allocate_to_d (struct call_context *context, struct arg_state *state)
++{
++ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
++ return get_d_addr (context, (state->nsrn)++);
++}
++
++static void *
++allocate_to_v (struct call_context *context, struct arg_state *state)
++{
++ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
++ return get_v_addr (context, (state->nsrn)++);
++}
++
++/* Allocate an aligned slot on the stack and return a pointer to it. */
++static void *
++allocate_to_stack (struct arg_state *state, void *stack, unsigned alignment,
++ unsigned size)
++{
++ void *allocation;
++
++ /* Round up the NSAA to the larger of 8 or the natural
++ alignment of the argument's type. */
++ state->nsaa = ALIGN (state->nsaa, alignment);
++ state->nsaa = ALIGN (state->nsaa, alignment);
++ state->nsaa = ALIGN (state->nsaa, 8);
++
++ allocation = stack + state->nsaa;
++
++ state->nsaa += size;
++ return allocation;
++}
++
++static void
++copy_basic_type (void *dest, void *source, unsigned short type)
++{
++ /* This is neccessary to ensure that basic types are copied
++ sign extended to 64-bits as libffi expects. */
++ switch (type)
++ {
++ case FFI_TYPE_FLOAT:
++ *(float *) dest = *(float *) source;
++ break;
++ case FFI_TYPE_DOUBLE:
++ *(double *) dest = *(double *) source;
++ break;
++ case FFI_TYPE_LONGDOUBLE:
++ *(long double *) dest = *(long double *) source;
++ break;
++ case FFI_TYPE_UINT8:
++ *(ffi_arg *) dest = *(UINT8 *) source;
++ break;
++ case FFI_TYPE_SINT8:
++ *(ffi_sarg *) dest = *(SINT8 *) source;
++ break;
++ case FFI_TYPE_UINT16:
++ *(ffi_arg *) dest = *(UINT16 *) source;
++ break;
++ case FFI_TYPE_SINT16:
++ *(ffi_sarg *) dest = *(SINT16 *) source;
++ break;
++ case FFI_TYPE_UINT32:
++ *(ffi_arg *) dest = *(UINT32 *) source;
++ break;
++ case FFI_TYPE_INT:
++ case FFI_TYPE_SINT32:
++ *(ffi_sarg *) dest = *(SINT32 *) source;
++ break;
++ case FFI_TYPE_POINTER:
++ case FFI_TYPE_UINT64:
++ *(ffi_arg *) dest = *(UINT64 *) source;
++ break;
++ case FFI_TYPE_SINT64:
++ *(ffi_sarg *) dest = *(SINT64 *) source;
++ break;
++
++ default:
++ FFI_ASSERT (0);
++ }
++}
++
++static void
++copy_hfa_to_reg_or_stack (void *memory,
++ ffi_type *ty,
++ struct call_context *context,
++ unsigned char *stack,
++ struct arg_state *state)
++{
++ unsigned elems = element_count (ty);
++ if (available_v (state) < elems)
++ {
++ /* There are insufficient V registers. Further V register allocations
++ are prevented, the NSAA is adjusted (by allocate_to_stack ())
++ and the argument is copied to memory at the adjusted NSAA. */
++ state->nsrn = N_V_ARG_REG;
++ memcpy (allocate_to_stack (state, stack, ty->alignment, ty->size),
++ memory,
++ ty->size);
++ }
++ else
++ {
++ int i;
++ unsigned short type = get_homogeneous_type (ty);
++ unsigned elems = element_count (ty);
++ for (i = 0; i < elems; i++)
++ {
++ void *reg = allocate_to_v (context, state);
++ copy_basic_type (reg, memory, type);
++ memory += get_basic_type_size (type);
++ }
++ }
++}
++
++/* Either allocate an appropriate register for the argument type, or if
++ none are available, allocate a stack slot and return a pointer
++ to the allocated space. */
++
++static void *
++allocate_to_register_or_stack (struct call_context *context,
++ unsigned char *stack,
++ struct arg_state *state,
++ unsigned short type)
++{
++ size_t alignment = get_basic_type_alignment (type);
++ size_t size = alignment;
++ switch (type)
++ {
++ case FFI_TYPE_FLOAT:
++ /* This is the only case for which the allocated stack size
++ should not match the alignment of the type. */
++ size = sizeof (UINT32);
++ /* Fall through. */
++ case FFI_TYPE_DOUBLE:
++ if (state->nsrn < N_V_ARG_REG)
++ return allocate_to_d (context, state);
++ state->nsrn = N_V_ARG_REG;
++ break;
++ case FFI_TYPE_LONGDOUBLE:
++ if (state->nsrn < N_V_ARG_REG)
++ return allocate_to_v (context, state);
++ state->nsrn = N_V_ARG_REG;
++ break;
++ case FFI_TYPE_UINT8:
++ case FFI_TYPE_SINT8:
++ case FFI_TYPE_UINT16:
++ case FFI_TYPE_SINT16:
++ case FFI_TYPE_UINT32:
++ case FFI_TYPE_SINT32: