summaryrefslogtreecommitdiff
path: root/toolchain
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2014-09-09 21:50:07 +0200
committerWaldemar Brodkorb <wbx@openadk.org>2014-09-09 21:50:07 +0200
commit4ca141ca304964c4181d50fabdb25772c0b0d493 (patch)
treef81f34f158847a70a0cdf1436f5b71fc7aea226d /toolchain
parent25bf5a778d7c77c26ddcc2b8acadf69d3d6cbf56 (diff)
parentfe3ec0b2af8d02c1d5f198c119b80a335d89eb0f (diff)
Merge branch 'master' of git+ssh://openadk.org/git/openadk
Diffstat (limited to 'toolchain')
-rw-r--r--toolchain/Makefile2
-rw-r--r--toolchain/binutils/Makefile.inc7
-rw-r--r--toolchain/binutils/patches/2.20.1/avr32.patch30797
-rw-r--r--toolchain/gcc/Makefile2
-rw-r--r--toolchain/gcc/Makefile.inc8
-rw-r--r--toolchain/gcc/patches/4.4.7/930-avr32_support.patch22706
-rw-r--r--toolchain/gcc/patches/4.4.7/931-avr32_disable_shifted_data_opt.patch32
-rw-r--r--toolchain/gcc/patches/4.4.7/933-avr32_bug_7435.patch32
-rw-r--r--toolchain/gcc/patches/4.4.7/934-avr32_bug_9675.patch21
-rw-r--r--toolchain/gcc/patches/4.4.7/cflags.patch257
10 files changed, 53862 insertions, 2 deletions
diff --git a/toolchain/Makefile b/toolchain/Makefile
index 819ad405f..096c427ba 100644
--- a/toolchain/Makefile
+++ b/toolchain/Makefile
@@ -40,7 +40,7 @@ ELF2FLT:=elf2flt-install
endif
# disable gdb for arc
-ifneq ($(ADK_LINUX_ARC),y)
+ifeq ($(ADK_LINUX_ARC)$(ADK_LINUX_AVR32),)
TARGETS+=gdb
GDB:=gdb-install
endif
diff --git a/toolchain/binutils/Makefile.inc b/toolchain/binutils/Makefile.inc
index 054f74392..75819f916 100644
--- a/toolchain/binutils/Makefile.inc
+++ b/toolchain/binutils/Makefile.inc
@@ -23,3 +23,10 @@ PKG_RELEASE:= 1
PKG_SITES:= ${MASTER_SITE_GNU:=binutils/}
DISTFILES:= ${PKG_NAME}-${PKG_VERSION}.tar.gz
endif
+ifeq ($(ADK_TOOLCHAIN_BINUTILS_2_20_1),y)
+PKG_VERSION:= 2.20.1
+PKG_MD5SUM:= a5dd5dd2d212a282cc1d4a84633e0d88
+PKG_RELEASE:= 1
+PKG_SITES:= ${MASTER_SITE_GNU:=binutils/}
+DISTFILES:= ${PKG_NAME}-${PKG_VERSION}.tar.gz
+endif
diff --git a/toolchain/binutils/patches/2.20.1/avr32.patch b/toolchain/binutils/patches/2.20.1/avr32.patch
new file mode 100644
index 000000000..646049cc0
--- /dev/null
+++ b/toolchain/binutils/patches/2.20.1/avr32.patch
@@ -0,0 +1,30797 @@
+--- a/bfd/archures.c
++++ b/bfd/archures.c
+@@ -368,6 +368,12 @@ DESCRIPTION
+ .#define bfd_mach_avr5 5
+ .#define bfd_mach_avr51 51
+ .#define bfd_mach_avr6 6
++. bfd_arch_avr32, {* Atmel AVR32 *}
++.#define bfd_mach_avr32_ap 7000
++.#define bfd_mach_avr32_uc 3000
++.#define bfd_mach_avr32_ucr1 3001
++.#define bfd_mach_avr32_ucr2 3002
++.#define bfd_mach_avr32_ucr3 3003
+ . bfd_arch_bfin, {* ADI Blackfin *}
+ .#define bfd_mach_bfin 1
+ . bfd_arch_cr16, {* National Semiconductor CompactRISC (ie CR16). *}
+@@ -465,6 +471,7 @@ extern const bfd_arch_info_type bfd_alph
+ extern const bfd_arch_info_type bfd_arc_arch;
+ extern const bfd_arch_info_type bfd_arm_arch;
+ extern const bfd_arch_info_type bfd_avr_arch;
++extern const bfd_arch_info_type bfd_avr32_arch;
+ extern const bfd_arch_info_type bfd_bfin_arch;
+ extern const bfd_arch_info_type bfd_cr16_arch;
+ extern const bfd_arch_info_type bfd_cr16c_arch;
+@@ -541,6 +548,7 @@ static const bfd_arch_info_type * const
+ &bfd_arc_arch,
+ &bfd_arm_arch,
+ &bfd_avr_arch,
++ &bfd_avr32_arch,
+ &bfd_bfin_arch,
+ &bfd_cr16_arch,
+ &bfd_cr16c_arch,
+--- a/bfd/config.bfd
++++ b/bfd/config.bfd
+@@ -347,6 +347,10 @@ case "${targ}" in
+ targ_underscore=yes
+ ;;
+
++ avr32-*-*)
++ targ_defvec=bfd_elf32_avr32_vec
++ ;;
++
+ c30-*-*aout* | tic30-*-*aout*)
+ targ_defvec=tic30_aout_vec
+ ;;
+--- a/bfd/configure.in
++++ b/bfd/configure.in
+@@ -675,6 +675,7 @@ do
+ bfd_pei_ia64_vec) tb="$tb pei-ia64.lo pepigen.lo cofflink.lo"; target_size=64 ;;
+ bfd_elf32_am33lin_vec) tb="$tb elf32-am33lin.lo elf32.lo $elf" ;;
+ bfd_elf32_avr_vec) tb="$tb elf32-avr.lo elf32.lo $elf" ;;
++ bfd_elf32_avr32_vec) tb="$tb elf32-avr32.lo elf32.lo $elf" ;;
+ bfd_elf32_bfin_vec) tb="$tb elf32-bfin.lo elf32.lo $elf" ;;
+ bfd_elf32_bfinfdpic_vec) tb="$tb elf32-bfin.lo elf32.lo $elf" ;;
+ bfd_elf32_big_generic_vec) tb="$tb elf32-gen.lo elf32.lo $elf" ;;
+--- /dev/null
++++ b/bfd/cpu-avr32.c
+@@ -0,0 +1,52 @@
++/* BFD library support routines for AVR32.
++ Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++#include "bfd.h"
++#include "sysdep.h"
++#include "libbfd.h"
++
++#define N(machine, print, default, next) \
++ { \
++ 32, /* 32 bits in a word */ \
++ 32, /* 32 bits in an address */ \
++ 8, /* 8 bits in a byte */ \
++ bfd_arch_avr32, /* architecture */ \
++ machine, /* machine */ \
++ "avr32", /* arch name */ \
++ print, /* printable name */ \
++ 1, /* section align power */ \
++ default, /* the default machine? */ \
++ bfd_default_compatible, \
++ bfd_default_scan, \
++ next, \
++ }
++
++static const bfd_arch_info_type cpu_info[] =
++{
++ N(bfd_mach_avr32_ap, "avr32:ap", FALSE, &cpu_info[1]),
++ N(bfd_mach_avr32_uc, "avr32:uc", FALSE, &cpu_info[2]),
++ N(bfd_mach_avr32_ucr1, "avr32:ucr1", FALSE, &cpu_info[3]),
++ N(bfd_mach_avr32_ucr2, "avr32:ucr2", FALSE, &cpu_info[4]),
++ N(bfd_mach_avr32_ucr3, "avr32:ucr3", FALSE, NULL),
++};
++
++const bfd_arch_info_type bfd_avr32_arch =
++ N(bfd_mach_avr32_ap, "avr32", TRUE, &cpu_info[0]);
+--- /dev/null
++++ b/bfd/elf32-avr32.c
+@@ -0,0 +1,3915 @@
++/* AVR32-specific support for 32-bit ELF.
++ Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++#include "bfd.h"
++#include "sysdep.h"
++#include "bfdlink.h"
++#include "libbfd.h"
++#include "elf-bfd.h"
++#include "elf/avr32.h"
++#include "elf32-avr32.h"
++
++#define xDEBUG
++#define xRELAX_DEBUG
++
++#ifdef DEBUG
++# define pr_debug(fmt, args...) fprintf(stderr, fmt, ##args)
++#else
++# define pr_debug(fmt, args...) do { } while (0)
++#endif
++
++#ifdef RELAX_DEBUG
++# define RDBG(fmt, args...) fprintf(stderr, fmt, ##args)
++#else
++# define RDBG(fmt, args...) do { } while (0)
++#endif
++
++/* When things go wrong, we want it to blow up, damnit! */
++#undef BFD_ASSERT
++#undef abort
++#define BFD_ASSERT(expr) \
++ do \
++ { \
++ if (!(expr)) \
++ { \
++ bfd_assert(__FILE__, __LINE__); \
++ abort(); \
++ } \
++ } \
++ while (0)
++
++/* The name of the dynamic interpreter. This is put in the .interp section. */
++#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
++
++#define AVR32_GOT_HEADER_SIZE 8
++#define AVR32_FUNCTION_STUB_SIZE 8
++
++#define ELF_R_INFO(x, y) ELF32_R_INFO(x, y)
++#define ELF_R_TYPE(x) ELF32_R_TYPE(x)
++#define ELF_R_SYM(x) ELF32_R_SYM(x)
++
++#define NOP_OPCODE 0xd703
++
++
++/* Mapping between BFD relocations and ELF relocations */
++
++static reloc_howto_type *
++bfd_elf32_bfd_reloc_type_lookup(bfd *abfd, bfd_reloc_code_real_type code);
++
++static reloc_howto_type *
++bfd_elf32_bfd_reloc_name_lookup(bfd *abfd, const char *r_name);
++
++static void
++avr32_info_to_howto (bfd *abfd, arelent *cache_ptr, Elf_Internal_Rela *dst);
++
++/* Generic HOWTO */
++#define GENH(name, align, size, bitsize, pcrel, bitpos, complain, mask) \
++ HOWTO(name, align, size, bitsize, pcrel, bitpos, \
++ complain_overflow_##complain, bfd_elf_generic_reloc, #name, \
++ FALSE, 0, mask, pcrel)
++
++static reloc_howto_type elf_avr32_howto_table[] = {
++ /* NAME ALN SZ BSZ PCREL BP COMPLAIN MASK */
++ GENH(R_AVR32_NONE, 0, 0, 0, FALSE, 0, dont, 0x00000000),
++
++ GENH(R_AVR32_32, 0, 2, 32, FALSE, 0, dont, 0xffffffff),
++ GENH(R_AVR32_16, 0, 1, 16, FALSE, 0, bitfield, 0x0000ffff),
++ GENH(R_AVR32_8, 0, 0, 8, FALSE, 0, bitfield, 0x000000ff),
++ GENH(R_AVR32_32_PCREL, 0, 2, 32, TRUE, 0, signed, 0xffffffff),
++ GENH(R_AVR32_16_PCREL, 0, 1, 16, TRUE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_8_PCREL, 0, 0, 8, TRUE, 0, signed, 0x000000ff),
++
++ /* Difference between two symbol (sym2 - sym1). The reloc encodes
++ the value of sym1. The field contains the difference before any
++ relaxing is done. */
++ GENH(R_AVR32_DIFF32, 0, 2, 32, FALSE, 0, dont, 0xffffffff),
++ GENH(R_AVR32_DIFF16, 0, 1, 16, FALSE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_DIFF8, 0, 0, 8, FALSE, 0, signed, 0x000000ff),
++
++ GENH(R_AVR32_GOT32, 0, 2, 32, FALSE, 0, signed, 0xffffffff),
++ GENH(R_AVR32_GOT16, 0, 1, 16, FALSE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_GOT8, 0, 0, 8, FALSE, 0, signed, 0x000000ff),
++
++ GENH(R_AVR32_21S, 0, 2, 21, FALSE, 0, signed, 0x1e10ffff),
++ GENH(R_AVR32_16U, 0, 2, 16, FALSE, 0, unsigned, 0x0000ffff),
++ GENH(R_AVR32_16S, 0, 2, 16, FALSE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_8S, 0, 1, 8, FALSE, 4, signed, 0x00000ff0),
++ GENH(R_AVR32_8S_EXT, 0, 2, 8, FALSE, 0, signed, 0x000000ff),
++
++ GENH(R_AVR32_22H_PCREL, 1, 2, 21, TRUE, 0, signed, 0x1e10ffff),
++ GENH(R_AVR32_18W_PCREL, 2, 2, 16, TRUE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_16B_PCREL, 0, 2, 16, TRUE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_16N_PCREL, 0, 2, 16, TRUE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_14UW_PCREL, 2, 2, 12, TRUE, 0, unsigned, 0x0000f0ff),
++ GENH(R_AVR32_11H_PCREL, 1, 1, 10, TRUE, 4, signed, 0x00000ff3),
++ GENH(R_AVR32_10UW_PCREL, 2, 2, 8, TRUE, 0, unsigned, 0x000000ff),
++ GENH(R_AVR32_9H_PCREL, 1, 1, 8, TRUE, 4, signed, 0x00000ff0),
++ GENH(R_AVR32_9UW_PCREL, 2, 1, 7, TRUE, 4, unsigned, 0x000007f0),
++
++ GENH(R_AVR32_HI16, 16, 2, 16, FALSE, 0, dont, 0x0000ffff),
++ GENH(R_AVR32_LO16, 0, 2, 16, FALSE, 0, dont, 0x0000ffff),
++
++ GENH(R_AVR32_GOTPC, 0, 2, 32, FALSE, 0, dont, 0xffffffff),
++ GENH(R_AVR32_GOTCALL, 2, 2, 21, FALSE, 0, signed, 0x1e10ffff),
++ GENH(R_AVR32_LDA_GOT, 2, 2, 21, FALSE, 0, signed, 0x1e10ffff),
++ GENH(R_AVR32_GOT21S, 0, 2, 21, FALSE, 0, signed, 0x1e10ffff),
++ GENH(R_AVR32_GOT18SW, 2, 2, 16, FALSE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_GOT16S, 0, 2, 16, FALSE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_GOT7UW, 2, 1, 5, FALSE, 4, unsigned, 0x000001f0),
++
++ GENH(R_AVR32_32_CPENT, 0, 2, 32, FALSE, 0, dont, 0xffffffff),
++ GENH(R_AVR32_CPCALL, 2, 2, 16, TRUE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_16_CP, 0, 2, 16, TRUE, 0, signed, 0x0000ffff),
++ GENH(R_AVR32_9W_CP, 2, 1, 7, TRUE, 4, unsigned, 0x000007f0),
++
++ GENH(R_AVR32_RELATIVE, 0, 2, 32, FALSE, 0, signed, 0xffffffff),
++ GENH(R_AVR32_GLOB_DAT, 0, 2, 32, FALSE, 0, dont, 0xffffffff),
++ GENH(R_AVR32_JMP_SLOT, 0, 2, 32, FALSE, 0, dont, 0xffffffff),
++
++ GENH(R_AVR32_ALIGN, 0, 1, 0, FALSE, 0, unsigned, 0x00000000),
++
++ GENH(R_AVR32_15S, 2, 2, 15, FALSE, 0, signed, 0x00007fff),
++};
++
++struct elf_reloc_map
++{
++ bfd_reloc_code_real_type bfd_reloc_val;
++ unsigned char elf_reloc_val;
++};
++
++static const struct elf_reloc_map avr32_reloc_map[] =
++{
++ { BFD_RELOC_NONE, R_AVR32_NONE },
++
++ { BFD_RELOC_32, R_AVR32_32 },
++ { BFD_RELOC_16, R_AVR32_16 },
++ { BFD_RELOC_8, R_AVR32_8 },
++ { BFD_RELOC_32_PCREL, R_AVR32_32_PCREL },
++ { BFD_RELOC_16_PCREL, R_AVR32_16_PCREL },
++ { BFD_RELOC_8_PCREL, R_AVR32_8_PCREL },
++ { BFD_RELOC_AVR32_DIFF32, R_AVR32_DIFF32 },
++ { BFD_RELOC_AVR32_DIFF16, R_AVR32_DIFF16 },
++ { BFD_RELOC_AVR32_DIFF8, R_AVR32_DIFF8 },
++ { BFD_RELOC_AVR32_GOT32, R_AVR32_GOT32 },
++ { BFD_RELOC_AVR32_GOT16, R_AVR32_GOT16 },
++ { BFD_RELOC_AVR32_GOT8, R_AVR32_GOT8 },
++
++ { BFD_RELOC_AVR32_21S, R_AVR32_21S },
++ { BFD_RELOC_AVR32_16U, R_AVR32_16U },
++ { BFD_RELOC_AVR32_16S, R_AVR32_16S },
++ { BFD_RELOC_AVR32_SUB5, R_AVR32_16S },
++ { BFD_RELOC_AVR32_8S_EXT, R_AVR32_8S_EXT },
++ { BFD_RELOC_AVR32_8S, R_AVR32_8S },
++
++ { BFD_RELOC_AVR32_22H_PCREL, R_AVR32_22H_PCREL },
++ { BFD_RELOC_AVR32_18W_PCREL, R_AVR32_18W_PCREL },
++ { BFD_RELOC_AVR32_16B_PCREL, R_AVR32_16B_PCREL },
++ { BFD_RELOC_AVR32_16N_PCREL, R_AVR32_16N_PCREL },
++ { BFD_RELOC_AVR32_11H_PCREL, R_AVR32_11H_PCREL },
++ { BFD_RELOC_AVR32_10UW_PCREL, R_AVR32_10UW_PCREL },
++ { BFD_RELOC_AVR32_9H_PCREL, R_AVR32_9H_PCREL },
++ { BFD_RELOC_AVR32_9UW_PCREL, R_AVR32_9UW_PCREL },
++
++ { BFD_RELOC_HI16, R_AVR32_HI16 },
++ { BFD_RELOC_LO16, R_AVR32_LO16 },
++
++ { BFD_RELOC_AVR32_GOTPC, R_AVR32_GOTPC },
++ { BFD_RELOC_AVR32_GOTCALL, R_AVR32_GOTCALL },
++ { BFD_RELOC_AVR32_LDA_GOT, R_AVR32_LDA_GOT },
++ { BFD_RELOC_AVR32_GOT21S, R_AVR32_GOT21S },
++ { BFD_RELOC_AVR32_GOT18SW, R_AVR32_GOT18SW },
++ { BFD_RELOC_AVR32_GOT16S, R_AVR32_GOT16S },
++ /* GOT7UW should never be generated by the assembler */
++
++ { BFD_RELOC_AVR32_32_CPENT, R_AVR32_32_CPENT },
++ { BFD_RELOC_AVR32_CPCALL, R_AVR32_CPCALL },
++ { BFD_RELOC_AVR32_16_CP, R_AVR32_16_CP },
++ { BFD_RELOC_AVR32_9W_CP, R_AVR32_9W_CP },
++
++ { BFD_RELOC_AVR32_ALIGN, R_AVR32_ALIGN },
++
++ { BFD_RELOC_AVR32_15S, R_AVR32_15S },
++};
++
++static reloc_howto_type *
++bfd_elf32_bfd_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ bfd_reloc_code_real_type code)
++{
++ unsigned int i;
++
++ for (i = 0; i < sizeof(avr32_reloc_map) / sizeof(struct elf_reloc_map); i++)
++ {
++ if (avr32_reloc_map[i].bfd_reloc_val == code)
++ return &elf_avr32_howto_table[avr32_reloc_map[i].elf_reloc_val];
++ }
++
++ return NULL;
++}
++
++static reloc_howto_type *
++bfd_elf32_bfd_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ const char *r_name)
++{
++ unsigned int i;
++
++ for (i = 0;
++ i < sizeof (elf_avr32_howto_table) / sizeof (elf_avr32_howto_table[0]);
++ i++)
++ if (elf_avr32_howto_table[i].name != NULL
++ && strcasecmp (elf_avr32_howto_table[i].name, r_name) == 0)
++ return &elf_avr32_howto_table[i];
++
++ return NULL;
++}
++
++/* Set the howto pointer for an AVR32 ELF reloc. */
++static void
++avr32_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
++ arelent *cache_ptr,
++ Elf_Internal_Rela *dst)
++{
++ unsigned int r_type;
++
++ r_type = ELF32_R_TYPE (dst->r_info);
++ BFD_ASSERT (r_type < (unsigned int) R_AVR32_max);
++ cache_ptr->howto = &elf_avr32_howto_table[r_type];
++}
++
++
++/* AVR32 ELF linker hash table and associated hash entries. */
++
++static struct bfd_hash_entry *
++avr32_elf_link_hash_newfunc(struct bfd_hash_entry *entry,
++ struct bfd_hash_table *table,
++ const char *string);
++static void
++avr32_elf_copy_indirect_symbol(struct bfd_link_info *info,
++ struct elf_link_hash_entry *dir,
++ struct elf_link_hash_entry *ind);
++static struct bfd_link_hash_table *
++avr32_elf_link_hash_table_create(bfd *abfd);
++
++/*
++ Try to limit memory usage to something reasonable when sorting the
++ GOT. If just a couple of entries end up getting more references
++ than this, it won't affect performance at all, but if there are many
++ of them, we could end up with the wrong symbols being assigned the
++ first GOT entries.
++*/
++#define MAX_NR_GOT_HOLES 2048
++
++/*
++ AVR32 GOT entry. We need to keep track of refcounts and offsets
++ simultaneously, since we need the offsets during relaxation, and we
++ also want to be able to drop GOT entries during relaxation. In
++ addition to this, we want to keep the list of GOT entries sorted so
++ that we can keep the most-used entries at the lowest offsets.
++*/
++struct got_entry
++{
++ struct got_entry *next;
++ struct got_entry **pprev;
++ int refcount;
++ bfd_signed_vma offset;
++};
++
++struct elf_avr32_link_hash_entry
++{
++ struct elf_link_hash_entry root;
++
++ /* Number of runtime relocations against this symbol. */
++ unsigned int possibly_dynamic_relocs;
++
++ /* If there are anything but R_AVR32_GOT18 relocations against this
++ symbol, it means that someone may be taking the address of the
++ function, and we should therefore not create a stub. */
++ bfd_boolean no_fn_stub;
++
++ /* If there is a R_AVR32_32 relocation in a read-only section
++ against this symbol, we could be in trouble. If we're linking a
++ shared library or this symbol is defined in one, it means we must
++ emit a run-time reloc for it and that's not allowed in read-only
++ sections. */
++ asection *readonly_reloc_sec;
++ bfd_vma readonly_reloc_offset;
++
++ /* Record which frag (if any) contains the symbol. This is used
++ during relaxation in order to avoid having to update all symbols
++ whenever we move something. For local symbols, this information
++ is in the local_sym_frag member of struct elf_obj_tdata. */
++ struct fragment *sym_frag;
++};
++#define avr32_elf_hash_entry(ent) ((struct elf_avr32_link_hash_entry *)(ent))
++
++struct elf_avr32_link_hash_table
++{
++ struct elf_link_hash_table root;
++
++ /* Shortcuts to get to dynamic linker sections. */
++ asection *sgot;
++ asection *srelgot;
++ asection *sstub;
++
++ /* We use a variation of Pigeonhole Sort to sort the GOT. After the
++ initial refcounts have been determined, we initialize
++ nr_got_holes to the highest refcount ever seen and allocate an
++ array of nr_got_holes entries for got_hole. Each GOT entry is
++ then stored in this array at the index given by its refcount.
++
++ When a GOT entry has its refcount decremented during relaxation,
++ it is moved to a lower index in the got_hole array.
++ */
++ struct got_entry **got_hole;
++ int nr_got_holes;
++
++ /* Dynamic relocations to local symbols. Only used when linking a
++ shared library and -Bsymbolic is not given. */
++ unsigned int local_dynamic_relocs;
++
++ bfd_boolean relocations_analyzed;
++ bfd_boolean symbols_adjusted;
++ bfd_boolean repeat_pass;
++ bfd_boolean direct_data_refs;
++ unsigned int relax_iteration;
++ unsigned int relax_pass;
++};
++#define avr32_elf_hash_table(p) \
++ ((struct elf_avr32_link_hash_table *)((p)->hash))
++
++static struct bfd_hash_entry *
++avr32_elf_link_hash_newfunc(struct bfd_hash_entry *entry,
++ struct bfd_hash_table *table,
++ const char *string)
++{
++ struct elf_avr32_link_hash_entry *ret = avr32_elf_hash_entry(entry);
++
++ /* Allocate the structure if it hasn't already been allocated by a
++ subclass */
++ if (ret == NULL)
++ ret = (struct elf_avr32_link_hash_entry *)
++ bfd_hash_allocate(table, sizeof(struct elf_avr32_link_hash_entry));
++
++ if (ret == NULL)
++ return NULL;
++
++ memset(ret, 0, sizeof(struct elf_avr32_link_hash_entry));
++
++ /* Give the superclass a chance */
++ ret = (struct elf_avr32_link_hash_entry *)
++ _bfd_elf_link_hash_newfunc((struct bfd_hash_entry *)ret, table, string);
++
++ return (struct bfd_hash_entry *)ret;
++}
++
++/* Copy data from an indirect symbol to its direct symbol, hiding the
++ old indirect symbol. Process additional relocation information.
++ Also called for weakdefs, in which case we just let
++ _bfd_elf_link_hash_copy_indirect copy the flags for us. */
++
++static void
++avr32_elf_copy_indirect_symbol(struct bfd_link_info *info,
++ struct elf_link_hash_entry *dir,
++ struct elf_link_hash_entry *ind)
++{
++ struct elf_avr32_link_hash_entry *edir, *eind;
++
++ _bfd_elf_link_hash_copy_indirect (info, dir, ind);
++
++ if (ind->root.type != bfd_link_hash_indirect)
++ return;
++
++ edir = (struct elf_avr32_link_hash_entry *)dir;
++ eind = (struct elf_avr32_link_hash_entry *)ind;
++
++ edir->possibly_dynamic_relocs += eind->possibly_dynamic_relocs;
++ edir->no_fn_stub = edir->no_fn_stub || eind->no_fn_stub;
++}
++
++static struct bfd_link_hash_table *
++avr32_elf_link_hash_table_create(bfd *abfd)
++{
++ struct elf_avr32_link_hash_table *ret;
++
++ ret = bfd_zmalloc(sizeof(*ret));
++ if (ret == NULL)
++ return NULL;
++
++ if (! _bfd_elf_link_hash_table_init(&ret->root, abfd,
++ avr32_elf_link_hash_newfunc,
++ sizeof (struct elf_avr32_link_hash_entry)))
++ {
++ free(ret);
++ return NULL;
++ }
++
++ /* Prevent the BFD core from creating bogus got_entry pointers */
++ ret->root.init_got_refcount.glist = NULL;
++ ret->root.init_plt_refcount.glist = NULL;
++ ret->root.init_got_offset.glist = NULL;
++ ret->root.init_plt_offset.glist = NULL;
++
++ return &ret->root.root;
++}
++
++
++/* Initial analysis and creation of dynamic sections and symbols */
++
++static asection *
++create_dynamic_section(bfd *dynobj, const char *name, flagword flags,
++ unsigned int align_power);
++static struct elf_link_hash_entry *
++create_dynamic_symbol(bfd *dynobj, struct bfd_link_info *info,
++ const char *name, asection *sec,
++ bfd_vma offset);
++static bfd_boolean
++avr32_elf_create_got_section (bfd *dynobj, struct bfd_link_info *info);
++static bfd_boolean
++avr32_elf_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info);
++static bfd_boolean
++avr32_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec,
++ const Elf_Internal_Rela *relocs);
++static bfd_boolean
++avr32_elf_adjust_dynamic_symbol(struct bfd_link_info *info,
++ struct elf_link_hash_entry *h);
++
++static asection *
++create_dynamic_section(bfd *dynobj, const char *name, flagword flags,
++ unsigned int align_power)
++{
++ asection *sec;
++
++ sec = bfd_make_section(dynobj, name);
++ if (!sec
++ || !bfd_set_section_flags(dynobj, sec, flags)
++ || !bfd_set_section_alignment(dynobj, sec, align_power))
++ return NULL;
++
++ return sec;
++}
++
++static struct elf_link_hash_entry *
++create_dynamic_symbol(bfd *dynobj, struct bfd_link_info *info,
++ const char *name, asection *sec,
++ bfd_vma offset)
++{
++ struct bfd_link_hash_entry *bh = NULL;
++ struct elf_link_hash_entry *h;
++ const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
++
++ if (!(_bfd_generic_link_add_one_symbol
++ (info, dynobj, name, BSF_GLOBAL, sec, offset, NULL, FALSE,
++ bed->collect, &bh)))
++ return NULL;
++
++ h = (struct elf_link_hash_entry *)bh;
++ h->def_regular = 1;
++ h->type = STT_OBJECT;
++ h->other = STV_HIDDEN;
++
++ return h;
++}
++
++static bfd_boolean
++avr32_elf_create_got_section (bfd *dynobj, struct bfd_link_info *info)
++{
++ struct elf_avr32_link_hash_table *htab;
++ flagword flags;
++ const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
++
++ htab = avr32_elf_hash_table(info);
++ flags = bed->dynamic_sec_flags;
++
++ if (htab->sgot)
++ return TRUE;
++
++ htab->sgot = create_dynamic_section(dynobj, ".got", flags, 2);
++ if (!htab->srelgot)
++ htab->srelgot = create_dynamic_section(dynobj, ".rela.got",
++ flags | SEC_READONLY, 2);
++
++ if (!htab->sgot || !htab->srelgot)
++ return FALSE;
++
++ htab->root.hgot = create_dynamic_symbol(dynobj, info, "_GLOBAL_OFFSET_TABLE_",
++ htab->sgot, 0);
++ if (!htab->root.hgot)
++ return FALSE;
++
++ /* Make room for the GOT header */
++ htab->sgot->size += bed->got_header_size;
++
++ return TRUE;
++}
++
++/* (1) Create all dynamic (i.e. linker generated) sections that we may
++ need during the link */
++
++static bfd_boolean
++avr32_elf_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
++{
++ struct elf_avr32_link_hash_table *htab;
++ flagword flags;
++ const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
++
++ pr_debug("(1) create dynamic sections\n");
++
++ htab = avr32_elf_hash_table(info);
++ flags = bed->dynamic_sec_flags;
++
++ if (!avr32_elf_create_got_section (dynobj, info))
++ return FALSE;
++
++ if (!htab->sstub)
++ htab->sstub = create_dynamic_section(dynobj, ".stub",
++ flags | SEC_READONLY | SEC_CODE, 2);
++
++ if (!htab->sstub)
++ return FALSE;
++
++ return TRUE;
++}
++
++/* (2) Go through all the relocs and count any potential GOT- or
++ PLT-references to each symbol */
++
++static bfd_boolean
++avr32_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec,
++ const Elf_Internal_Rela *relocs)
++{
++ Elf_Internal_Shdr *symtab_hdr;
++ struct elf_avr32_link_hash_table *htab;
++ struct elf_link_hash_entry **sym_hashes;
++ const Elf_Internal_Rela *rel, *rel_end;
++ struct got_entry **local_got_ents;
++ struct got_entry *got;
++ const struct elf_backend_data *bed = get_elf_backend_data (abfd);
++ asection *sgot;
++ bfd *dynobj;
++
++ pr_debug("(2) check relocs for %s:<%s> (size 0x%lx)\n",
++ abfd->filename, sec->name, sec->size);
++
++ if (info->relocatable)
++ return TRUE;
++
++ dynobj = elf_hash_table(info)->dynobj;
++ symtab_hdr = &elf_tdata(abfd)->symtab_hdr;
++ sym_hashes = elf_sym_hashes(abfd);
++ htab = avr32_elf_hash_table(info);
++ local_got_ents = elf_local_got_ents(abfd);
++ sgot = htab->sgot;
++
++ rel_end = relocs + sec->reloc_count;
++ for (rel = relocs; rel < rel_end; rel++)
++ {
++ unsigned long r_symndx, r_type;
++ struct elf_avr32_link_hash_entry *h;
++
++ r_symndx = ELF32_R_SYM(rel->r_info);
++ r_type = ELF32_R_TYPE(rel->r_info);
++
++ /* Local symbols use local_got_ents, while others store the same
++ information in the hash entry */
++ if (r_symndx < symtab_hdr->sh_info)
++ {
++ pr_debug(" (2a) processing local symbol %lu\n", r_symndx);
++ h = NULL;
++ }
++ else
++ {
++ h = (struct elf_avr32_link_hash_entry *)
++ sym_hashes[r_symndx - symtab_hdr->sh_info];
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_avr32_link_hash_entry *)h->root.root.u.i.link;
++ pr_debug(" (2a) processing symbol %s\n", h->root.root.root.string);
++ }
++
++ /* Some relocs require special sections to be created. */
++ switch (r_type)
++ {
++ case R_AVR32_GOT32:
++ case R_AVR32_GOT16:
++ case R_AVR32_GOT8:
++ case R_AVR32_GOT21S:
++ case R_AVR32_GOT18SW:
++ case R_AVR32_GOT16S:
++ case R_AVR32_GOT7UW:
++ case R_AVR32_LDA_GOT:
++ case R_AVR32_GOTCALL:
++ if (rel->r_addend)
++ {
++ if (info->callbacks->reloc_dangerous
++ (info, _("Non-zero addend on GOT-relative relocation"),
++ abfd, sec, rel->r_offset) == FALSE)
++ return FALSE;
++ }
++ /* fall through */
++ case R_AVR32_GOTPC:
++ if (dynobj == NULL)
++ elf_hash_table(info)->dynobj = dynobj = abfd;
++ if (sgot == NULL && !avr32_elf_create_got_section(dynobj, info))
++ return FALSE;
++ break;
++ case R_AVR32_32:
++ /* We may need to create .rela.dyn later on. */
++ if (dynobj == NULL
++ && (info->shared || h != NULL)
++ && (sec->flags & SEC_ALLOC))
++ elf_hash_table(info)->dynobj = dynobj = abfd;
++ break;
++ }
++
++ if (h != NULL && r_type != R_AVR32_GOT18SW)
++ h->no_fn_stub = TRUE;
++
++ switch (r_type)
++ {
++ case R_AVR32_GOT32:
++ case R_AVR32_GOT16:
++ case R_AVR32_GOT8:
++ case R_AVR32_GOT21S:
++ case R_AVR32_GOT18SW:
++ case R_AVR32_GOT16S:
++ case R_AVR32_GOT7UW:
++ case R_AVR32_LDA_GOT:
++ case R_AVR32_GOTCALL:
++ if (h != NULL)
++ {
++ got = h->root.got.glist;
++ if (!got)
++ {
++ got = bfd_zalloc(abfd, sizeof(struct got_entry));
++ if (!got)
++ return FALSE;
++ h->root.got.glist = got;
++ }
++ }
++ else
++ {
++ if (!local_got_ents)
++ {
++ bfd_size_type size;
++ bfd_size_type i;
++ struct got_entry *tmp_entry;
++
++ size = symtab_hdr->sh_info;
++ size *= sizeof(struct got_entry *) + sizeof(struct got_entry);
++ local_got_ents = bfd_zalloc(abfd, size);
++ if (!local_got_ents)
++ return FALSE;
++
++ elf_local_got_ents(abfd) = local_got_ents;
++
++ tmp_entry = (struct got_entry *)(local_got_ents
++ + symtab_hdr->sh_info);
++ for (i = 0; i < symtab_hdr->sh_info; i++)
++ local_got_ents[i] = &tmp_entry[i];
++ }
++
++ got = local_got_ents[r_symndx];
++ }
++
++ got->refcount++;
++ if (got->refcount > htab->nr_got_holes)
++ htab->nr_got_holes = got->refcount;
++ break;
++
++ case R_AVR32_32:
++ if ((info->shared || h != NULL)
++ && (sec->flags & SEC_ALLOC))
++ {
++ if (htab->srelgot == NULL)
++ {
++ htab->srelgot = create_dynamic_section(dynobj, ".rela.got",
++ bed->dynamic_sec_flags
++ | SEC_READONLY, 2);
++ if (htab->srelgot == NULL)
++ return FALSE;
++ }
++
++ if (sec->flags & SEC_READONLY
++ && !h->readonly_reloc_sec)
++ {
++ h->readonly_reloc_sec = sec;
++ h->readonly_reloc_offset = rel->r_offset;
++ }
++
++ if (h != NULL)
++ {
++ pr_debug("Non-GOT reference to symbol %s\n",
++ h->root.root.root.string);
++ h->possibly_dynamic_relocs++;
++ }
++ else
++ {
++ pr_debug("Non-GOT reference to local symbol %lu\n",
++ r_symndx);
++ htab->local_dynamic_relocs++;
++ }
++ }
++
++ break;
++
++ /* TODO: GNU_VTINHERIT and GNU_VTENTRY */
++ }
++ }
++
++ return TRUE;
++}
++
++/* (3) Adjust a symbol defined by a dynamic object and referenced by a
++ regular object. The current definition is in some section of the
++ dynamic object, but we're not including those sections. We have to
++ change the definition to something the rest of the link can
++ understand. */
++
++static bfd_boolean
++avr32_elf_adjust_dynamic_symbol(struct bfd_link_info *info,
++ struct elf_link_hash_entry *h)
++{
++ struct elf_avr32_link_hash_table *htab;
++ struct elf_avr32_link_hash_entry *havr;
++ bfd *dynobj;
++
++ pr_debug("(3) adjust dynamic symbol %s\n", h->root.root.string);
++
++ htab = avr32_elf_hash_table(info);
++ havr = (struct elf_avr32_link_hash_entry *)h;
++ dynobj = elf_hash_table(info)->dynobj;
++
++ /* Make sure we know what is going on here. */
++ BFD_ASSERT (dynobj != NULL
++ && (h->u.weakdef != NULL
++ || (h->def_dynamic
++ && h->ref_regular
++ && !h->def_regular)));
++
++ /* We don't want dynamic relocations in read-only sections. */
++ if (havr->readonly_reloc_sec)
++ {
++ if (info->callbacks->reloc_dangerous
++ (info, _("dynamic relocation in read-only section"),
++ havr->readonly_reloc_sec->owner, havr->readonly_reloc_sec,
++ havr->readonly_reloc_offset) == FALSE)
++ return FALSE;
++ }
++
++ /* If this is a function, create a stub if possible and set the
++ symbol to the stub location. */
++ if (0 && !havr->no_fn_stub)
++ {
++ if (!h->def_regular)
++ {
++ asection *s = htab->sstub;
++
++ BFD_ASSERT(s != NULL);
++
++ h->root.u.def.section = s;
++ h->root.u.def.value = s->size;
++ h->plt.offset = s->size;
++ s->size += AVR32_FUNCTION_STUB_SIZE;
++
++ return TRUE;
++ }
++ }
++ else if (h->type == STT_FUNC)
++ {
++ /* This will set the entry for this symbol in the GOT to 0, and
++ the dynamic linker will take care of this. */
++ h->root.u.def.value = 0;
++ return TRUE;
++ }
++
++ /* If this is a weak symbol, and there is a real definition, the
++ processor independent code will have arranged for us to see the
++ real definition first, and we can just use the same value. */
++ if (h->u.weakdef != NULL)
++ {
++ BFD_ASSERT(h->u.weakdef->root.type == bfd_link_hash_defined
++ || h->u.weakdef->root.type == bfd_link_hash_defweak);
++ h->root.u.def.section = h->u.weakdef->root.u.def.section;
++ h->root.u.def.value = h->u.weakdef->root.u.def.value;
++ return TRUE;
++ }
++
++ /* This is a reference to a symbol defined by a dynamic object which
++ is not a function. */
++
++ return TRUE;
++}
++
++
++/* Garbage-collection of unused sections */
++
++static asection *
++avr32_elf_gc_mark_hook(asection *sec,
++ struct bfd_link_info *info ATTRIBUTE_UNUSED,
++ Elf_Internal_Rela *rel,
++ struct elf_link_hash_entry *h,
++ Elf_Internal_Sym *sym)
++{
++ if (h)
++ {
++ switch (ELF32_R_TYPE(rel->r_info))
++ {
++ /* TODO: VTINHERIT/VTENTRY */
++ default:
++ switch (h->root.type)
++ {
++ case bfd_link_hash_defined:
++ case bfd_link_hash_defweak:
++ return h->root.u.def.section;
++
++ case bfd_link_hash_common:
++ return h->root.u.c.p->section;
++
++ default:
++ break;
++ }
++ }
++ }
++ else
++ return bfd_section_from_elf_index(sec->owner, sym->st_shndx);
++
++ return NULL;
++}
++
++/* Update the GOT entry reference counts for the section being removed. */
++static bfd_boolean
++avr32_elf_gc_sweep_hook(bfd *abfd,
++ struct bfd_link_info *info ATTRIBUTE_UNUSED,
++ asection *sec,
++ const Elf_Internal_Rela *relocs)
++{
++ Elf_Internal_Shdr *symtab_hdr;
++ struct elf_avr32_link_hash_entry **sym_hashes;
++ struct got_entry **local_got_ents;
++ const Elf_Internal_Rela *rel, *relend;
++
++ if (!(sec->flags & SEC_ALLOC))
++ return TRUE;
++
++ symtab_hdr = &elf_tdata(abfd)->symtab_hdr;
++ sym_hashes = (struct elf_avr32_link_hash_entry **)elf_sym_hashes(abfd);
++ local_got_ents = elf_local_got_ents(abfd);
++
++ relend = relocs + sec->reloc_count;
++ for (rel = relocs; rel < relend; rel++)
++ {
++ unsigned long r_symndx;
++ unsigned int r_type;
++ struct elf_avr32_link_hash_entry *h = NULL;
++
++ r_symndx = ELF32_R_SYM(rel->r_info);
++ if (r_symndx >= symtab_hdr->sh_info)
++ {
++ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
++ while (h->root.root.type == bfd_link_hash_indirect
++ || h->root.root.type == bfd_link_hash_warning)
++ h = (struct elf_avr32_link_hash_entry *)h->root.root.u.i.link;
++ }
++
++ r_type = ELF32_R_TYPE(rel->r_info);
++
++ switch (r_type)
++ {
++ case R_AVR32_GOT32:
++ case R_AVR32_GOT16:
++ case R_AVR32_GOT8:
++ case R_AVR32_GOT21S:
++ case R_AVR32_GOT18SW:
++ case R_AVR32_GOT16S:
++ case R_AVR32_GOT7UW:
++ case R_AVR32_LDA_GOT:
++ case R_AVR32_GOTCALL:
++ if (h)
++ h->root.got.glist->refcount--;
++ else
++ local_got_ents[r_symndx]->refcount--;
++ break;
++
++ case R_AVR32_32:
++ if (info->shared || h)
++ {
++ if (h)
++ h->possibly_dynamic_relocs--;
++ else
++ avr32_elf_hash_table(info)->local_dynamic_relocs--;
++ }
++
++ default:
++ break;
++ }
++ }
++
++ return TRUE;
++}
++
++/* Sizing and refcounting of dynamic sections */
++
++static void
++insert_got_entry(struct elf_avr32_link_hash_table *htab, struct got_entry *got);
++static void
++unref_got_entry(struct elf_avr32_link_hash_table *htab, struct got_entry *got);
++static void
++ref_got_entry(struct elf_avr32_link_hash_table *htab, struct got_entry *got);
++static bfd_boolean
++assign_got_offsets(struct elf_avr32_link_hash_table *htab);
++static bfd_boolean
++allocate_dynrelocs(struct elf_link_hash_entry *h, void *_info);
++static bfd_boolean
++avr32_elf_size_dynamic_sections (bfd *output_bfd,
++ struct bfd_link_info *info);
++
++static void
++insert_got_entry(struct elf_avr32_link_hash_table *htab, struct got_entry *got)
++{
++ /* Any entries with got_refcount > htab->nr_got_holes end up in the
++ * last pigeonhole without any sorting. We expect the number of such
++ * entries to be small, so it is very unlikely to affect
++ * performance. */
++ int entry = got->refcount;
++
++ if (entry > htab->nr_got_holes)
++ entry = htab->nr_got_holes;
++
++ got->pprev = &htab->got_hole[entry];
++ got->next = htab->got_hole[entry];
++
++ if (got->next)
++ got->next->pprev = &got->next;
++
++ htab->got_hole[entry] = got;
++}
++
++/* Decrement the refcount of a GOT entry and update its position in
++ the pigeonhole array. */
++static void
++unref_got_entry(struct elf_avr32_link_hash_table *htab, struct got_entry *got)
++{
++ BFD_ASSERT(got->refcount > 0);
++
++ if (got->next)
++ got->next->pprev = got->pprev;
++
++ *(got->pprev) = got->next;
++ got->refcount--;
++ insert_got_entry(htab, got);
++}
++
++static void
++ref_got_entry(struct elf_avr32_link_hash_table *htab, struct got_entry *got)
++{
++ if (got->next)
++ got->next->pprev = got->pprev;
++
++ *(got->pprev) = got->next;
++ got->refcount++;
++ insert_got_entry(htab, got);
++
++ BFD_ASSERT(got->refcount > 0);
++}
++
++/* Assign offsets to all GOT entries we intend to keep. The entries
++ that are referenced most often are placed at low offsets so that we
++ can use compact instructions as much as possible.
++
++ Returns TRUE if any offsets or the total size of the GOT changed. */
++
++static bfd_boolean
++assign_got_offsets(struct elf_avr32_link_hash_table *htab)
++{
++ struct got_entry *got;
++ bfd_size_type got_size = 0;
++ bfd_boolean changed = FALSE;
++ bfd_signed_vma offset;
++ int i;
++
++ /* The GOT header provides the address of the DYNAMIC segment, so
++ we need that even if the GOT is otherwise empty. */
++ if (htab->root.dynamic_sections_created)
++ got_size = AVR32_GOT_HEADER_SIZE;
++
++ for (i = htab->nr_got_holes; i > 0; i--)
++ {
++ got = htab->got_hole[i];
++ while (got)
++ {
++ if (got->refcount > 0)
++ {
++ offset = got_size;
++ if (got->offset != offset)
++ {
++ RDBG("GOT offset changed: %ld -> %ld\n",
++ got->offset, offset);
++ changed = TRUE;
++ }
++ got->offset = offset;
++ got_size += 4;
++ }
++ got = got->next;
++ }
++ }
++
++ if (htab->sgot->size != got_size)
++ {
++ RDBG("GOT size changed: %lu -> %lu\n", htab->sgot->size,
++ got_size);
++ changed = TRUE;
++ }
++ htab->sgot->size = got_size;
++
++ RDBG("assign_got_offsets: total size %lu (%s)\n",
++ got_size, changed ? "changed" : "no change");
++
++ return changed;
++}
++
++static bfd_boolean
++allocate_dynrelocs(struct elf_link_hash_entry *h, void *_info)
++{
++ struct bfd_link_info *info = _info;
++ struct elf_avr32_link_hash_table *htab;
++ struct elf_avr32_link_hash_entry *havr;
++ struct got_entry *got;
++
++ pr_debug(" (4b) allocate_dynrelocs: %s\n", h->root.root.string);
++
++ if (h->root.type == bfd_link_hash_indirect)
++ return TRUE;
++
++ if (h->root.type == bfd_link_hash_warning)
++ /* When warning symbols are created, they **replace** the "real"
++ entry in the hash table, thus we never get to see the real
++ symbol in a hash traversal. So look at it now. */
++ h = (struct elf_link_hash_entry *) h->root.u.i.link;
++
++ htab = avr32_elf_hash_table(info);
++ havr = (struct elf_avr32_link_hash_entry *)h;
++
++ got = h->got.glist;
++
++ /* If got is NULL, the symbol is never referenced through the GOT */
++ if (got && got->refcount > 0)
++ {
++ insert_got_entry(htab, got);
++
++ /* Shared libraries need relocs for all GOT entries unless the
++ symbol is forced local or -Bsymbolic is used. Others need
++ relocs for everything that is not guaranteed to be defined in
++ a regular object. */
++ if ((info->shared
++ && !info->symbolic
++ && h->dynindx != -1)
++ || (htab->root.dynamic_sections_created
++ && h->def_dynamic
++ && !h->def_regular))
++ htab->srelgot->size += sizeof(Elf32_External_Rela);
++ }
++
++ if (havr->possibly_dynamic_relocs
++ && (info->shared
++ || (elf_hash_table(info)->dynamic_sections_created
++ && h->def_dynamic
++ && !h->def_regular)))
++ {
++ pr_debug("Allocating %d dynamic reloc against symbol %s...\n",
++ havr->possibly_dynamic_relocs, h->root.root.string);
++ htab->srelgot->size += (havr->possibly_dynamic_relocs
++ * sizeof(Elf32_External_Rela));
++ }
++
++ return TRUE;
++}
++
++/* (4) Calculate the sizes of the linker-generated sections and
++ allocate memory for them. */
++
++static bfd_boolean
++avr32_elf_size_dynamic_sections (bfd *output_bfd,
++ struct bfd_link_info *info)
++{
++ struct elf_avr32_link_hash_table *htab;
++ bfd *dynobj;
++ asection *s;
++ bfd *ibfd;
++ bfd_boolean relocs;
++
++ pr_debug("(4) size dynamic sections\n");
++
++ htab = avr32_elf_hash_table(info);
++ dynobj = htab->root.dynobj;
++ BFD_ASSERT(dynobj != NULL);
++
++ if (htab->root.dynamic_sections_created)
++ {
++ /* Initialize the contents of the .interp section to the name of
++ the dynamic loader */
++ if (info->executable)
++ {
++ s = bfd_get_section_by_name(dynobj, ".interp");
++ BFD_ASSERT(s != NULL);
++ s->size = sizeof(ELF_DYNAMIC_INTERPRETER);
++ s->contents = (unsigned char *)ELF_DYNAMIC_INTERPRETER;
++ }
++ }
++
++ if (htab->nr_got_holes > 0)
++ {
++ /* Allocate holes for the pigeonhole sort algorithm */
++ pr_debug("Highest GOT refcount: %d\n", htab->nr_got_holes);
++
++ /* Limit the memory usage by clipping the number of pigeonholes
++ * at a predefined maximum. All entries with a higher refcount
++ * will end up in the last pigeonhole. */
++ if (htab->nr_got_holes >= MAX_NR_GOT_HOLES)
++ {
++ htab->nr_got_holes = MAX_NR_GOT_HOLES - 1;
++
++ pr_debug("Limiting maximum number of GOT pigeonholes to %u\n",
++ htab->nr_got_holes);
++ }
++ htab->got_hole = bfd_zalloc(output_bfd,
++ sizeof(struct got_entry *)
++ * (htab->nr_got_holes + 1));
++ if (!htab->got_hole)
++ return FALSE;
++
++ /* Set up .got offsets for local syms. */
++ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
++ {
++ struct got_entry **local_got;
++ struct got_entry **end_local_got;
++ Elf_Internal_Shdr *symtab_hdr;
++ bfd_size_type locsymcount;
++
++ pr_debug(" (4a) processing file %s...\n", ibfd->filename);
++
++ BFD_ASSERT(bfd_get_flavour(ibfd) == bfd_target_elf_flavour);
++
++ local_got = elf_local_got_ents(ibfd);
++ if (!local_got)
++ continue;
++
++ symtab_hdr = &elf_tdata(ibfd)->symtab_hdr;
++ locsymcount = symtab_hdr->sh_info;
++ end_local_got = local_got + locsymcount;
++
++ for (; local_got < end_local_got; ++local_got)
++ insert_got_entry(htab, *local_got);
++ }
++ }
++
++ /* Allocate global sym .got entries and space for global sym
++ dynamic relocs */
++ elf_link_hash_traverse(&htab->root, allocate_dynrelocs, info);
++
++ /* Now that we have sorted the GOT entries, we are ready to
++ assign offsets and determine the initial size of the GOT. */
++ if (htab->sgot)
++ assign_got_offsets(htab);
++
++ /* Allocate space for local sym dynamic relocs */
++ BFD_ASSERT(htab->local_dynamic_relocs == 0 || info->shared);
++ if (htab->local_dynamic_relocs)
++ htab->srelgot->size += (htab->local_dynamic_relocs
++ * sizeof(Elf32_External_Rela));
++
++ /* We now have determined the sizes of the various dynamic
++ sections. Allocate memory for them. */
++ relocs = FALSE;
++ for (s = dynobj->sections; s; s = s->next)
++ {
++ if ((s->flags & SEC_LINKER_CREATED) == 0)
++ continue;
++
++ if (s == htab->sgot
++ || s == htab->sstub)
++ {
++ /* Strip this section if we don't need it */
++ }
++ else if (strncmp (bfd_get_section_name(dynobj, s), ".rela", 5) == 0)
++ {
++ if (s->size != 0)
++ relocs = TRUE;
++
++ s->reloc_count = 0;
++ }
++ else
++ {
++ /* It's not one of our sections */
++ continue;
++ }
++
++ if (s->size == 0)
++ {
++ /* Strip unneeded sections */
++ pr_debug("Stripping section %s from output...\n", s->name);
++ /* deleted function in 2.17
++ _bfd_strip_section_from_output(info, s);
++ */
++ continue;
++ }
++
++ s->contents = bfd_zalloc(dynobj, s->size);
++ if (s->contents == NULL)
++ return FALSE;
++ }
++
++ if (htab->root.dynamic_sections_created)
++ {
++ /* Add some entries to the .dynamic section. We fill in the
++ values later, in sh_elf_finish_dynamic_sections, but we
++ must add the entries now so that we get the correct size for
++ the .dynamic section. The DT_DEBUG entry is filled in by the
++ dynamic linker and used by the debugger. */
++#define add_dynamic_entry(TAG, VAL) _bfd_elf_add_dynamic_entry(info, TAG, VAL)
++
++ if (!add_dynamic_entry(DT_PLTGOT, 0))
++ return FALSE;
++ if (!add_dynamic_entry(DT_AVR32_GOTSZ, 0))
++ return FALSE;
++
++ if (info->executable)
++ {
++ if (!add_dynamic_entry(DT_DEBUG, 0))
++ return FALSE;
++ }
++ if (relocs)
++ {
++ if (!add_dynamic_entry(DT_RELA, 0)
++ || !add_dynamic_entry(DT_RELASZ, 0)
++ || !add_dynamic_entry(DT_RELAENT,
++ sizeof(Elf32_External_Rela)))
++ return FALSE;
++ }
++ }
++#undef add_dynamic_entry
++
++ return TRUE;
++}
++
++
++/* Access to internal relocations, section contents and symbols.
++ (stolen from the xtensa port) */
++
++static Elf_Internal_Rela *
++retrieve_internal_relocs (bfd *abfd, asection *sec, bfd_boolean keep_memory);
++static void
++pin_internal_relocs (asection *sec, Elf_Internal_Rela *internal_relocs);
++static void
++release_internal_relocs (asection *sec, Elf_Internal_Rela *internal_relocs);
++static bfd_byte *
++retrieve_contents (bfd *abfd, asection *sec, bfd_boolean keep_memory);
++/*
++static void
++pin_contents (asection *sec, bfd_byte *contents);
++*/
++static void
++release_contents (asection *sec, bfd_byte *contents);
++static Elf_Internal_Sym *
++retrieve_local_syms (bfd *input_bfd, bfd_boolean keep_memory);
++/*
++static void
++pin_local_syms (bfd *input_bfd, Elf_Internal_Sym *isymbuf);
++*/
++static void
++release_local_syms (bfd *input_bfd, Elf_Internal_Sym *isymbuf);
++
++/* During relaxation, we need to modify relocations, section contents,
++ and symbol definitions, and we need to keep the original values from
++ being reloaded from the input files, i.e., we need to "pin" the
++ modified values in memory. We also want to continue to observe the
++ setting of the "keep-memory" flag. The following functions wrap the
++ standard BFD functions to take care of this for us. */
++
++static Elf_Internal_Rela *
++retrieve_internal_relocs (bfd *abfd, asection *sec, bfd_boolean keep_memory)
++{
++ /* _bfd_elf_link_read_relocs knows about caching, so no need for us
++ to be clever here. */
++ return _bfd_elf_link_read_relocs(abfd, sec, NULL, NULL, keep_memory);
++}
++
++static void
++pin_internal_relocs (asection *sec, Elf_Internal_Rela *internal_relocs)
++{
++ elf_section_data (sec)->relocs = internal_relocs;
++}
++
++static void
++release_internal_relocs (asection *sec, Elf_Internal_Rela *internal_relocs)
++{
++ if (internal_relocs
++ && elf_section_data (sec)->relocs != internal_relocs)
++ free (internal_relocs);
++}
++
++static bfd_byte *
++retrieve_contents (bfd *abfd, asection *sec, bfd_boolean keep_memory)
++{
++ bfd_byte *contents;
++ bfd_size_type sec_size;
++
++ sec_size = bfd_get_section_limit (abfd, sec);
++ contents = elf_section_data (sec)->this_hdr.contents;
++
++ if (contents == NULL && sec_size != 0)
++ {
++ if (!bfd_malloc_and_get_section (abfd, sec, &contents))
++ {
++ if (contents)
++ free (contents);
++ return NULL;
++ }
++ if (keep_memory)
++ elf_section_data (sec)->this_hdr.contents = contents;
++ }
++ return contents;
++}
++
++/*
++static void
++pin_contents (asection *sec, bfd_byte *contents)
++{
++ elf_section_data (sec)->this_hdr.contents = contents;
++}
++*/
++static void
++release_contents (asection *sec, bfd_byte *contents)
++{
++ if (contents && elf_section_data (sec)->this_hdr.contents != contents)
++ free (contents);
++}
++
++static Elf_Internal_Sym *
++retrieve_local_syms (bfd *input_bfd, bfd_boolean keep_memory)
++{
++ Elf_Internal_Shdr *symtab_hdr;
++ Elf_Internal_Sym *isymbuf;
++ size_t locsymcount;
++
++ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
++ locsymcount = symtab_hdr->sh_info;
++
++ isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
++ if (isymbuf == NULL && locsymcount != 0)
++ {
++ isymbuf = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, locsymcount, 0,
++ NULL, NULL, NULL);
++ if (isymbuf && keep_memory)
++ symtab_hdr->contents = (unsigned char *) isymbuf;
++ }
++
++ return isymbuf;
++}
++
++/*
++static void
++pin_local_syms (bfd *input_bfd, Elf_Internal_Sym *isymbuf)
++{
++ elf_tdata (input_bfd)->symtab_hdr.contents = (unsigned char *)isymbuf;
++}
++
++*/
++static void
++release_local_syms (bfd *input_bfd, Elf_Internal_Sym *isymbuf)
++{
++ if (isymbuf && (elf_tdata (input_bfd)->symtab_hdr.contents
++ != (unsigned char *)isymbuf))
++ free (isymbuf);
++}
++
++ /* Data structures used during relaxation. */
++
++enum relax_state_id {
++ RS_ERROR = -1,
++ RS_NONE = 0,
++ RS_ALIGN,
++ RS_CPENT,
++ RS_PIC_CALL,
++ RS_PIC_MCALL,
++ RS_PIC_RCALL2,
++ RS_PIC_RCALL1,
++ RS_PIC_LDA,
++ RS_PIC_LDW4,
++ RS_PIC_LDW3,
++ RS_PIC_SUB5,
++ RS_NOPIC_MCALL,
++ RS_NOPIC_RCALL2,
++ RS_NOPIC_RCALL1,
++ RS_NOPIC_LDW4,
++ RS_NOPIC_LDDPC,
++ RS_NOPIC_SUB5,
++ RS_NOPIC_MOV2,
++ RS_NOPIC_MOV1,
++ RS_RCALL2,
++ RS_RCALL1,
++ RS_BRC2,
++ RS_BRC1,
++ RS_BRAL,
++ RS_RJMP,
++ RS_MAX,
++};
++
++enum reference_type {
++ REF_ABSOLUTE,
++ REF_PCREL,
++ REF_CPOOL,
++ REF_GOT,
++};
++
++struct relax_state
++{
++ const char *name;
++ enum relax_state_id id;
++ enum relax_state_id direct;
++ enum relax_state_id next;
++ enum relax_state_id prev;
++
++ enum reference_type reftype;
++
++ unsigned int r_type;
++
++ bfd_vma opcode;
++ bfd_vma opcode_mask;
++
++ bfd_signed_vma range_min;
++ bfd_signed_vma range_max;
++
++ bfd_size_type size;
++};
++
++/*
++ * This is for relocs that
++ * a) has an addend or is of type R_AVR32_DIFF32, and
++ * b) references a different section than it's in, and
++ * c) references a section that is relaxable
++ *
++ * as well as relocs that references the constant pool, in which case
++ * the add_frag member points to the frag containing the constant pool
++ * entry.
++ *
++ * Such relocs must be fixed up whenever we delete any code. Sections
++ * that don't have any relocs with all of the above properties don't
++ * have any additional reloc data, but sections that do will have
++ * additional data for all its relocs.
++ */
++struct avr32_reloc_data
++{
++ struct fragment *add_frag;
++ struct fragment *sub_frag;
++};
++
++/*
++ * A 'fragment' is a relaxable entity, that is, code may be added or
++ * deleted at the end of a fragment. When this happens, all subsequent
++ * fragments in the list will have their offsets updated.
++ */
++struct fragment
++{
++ enum relax_state_id state;
++ enum relax_state_id initial_state;
++
++ Elf_Internal_Rela *rela;
++ bfd_size_type size;
++ bfd_vma offset;
++ int size_adjust;
++ int offset_adjust;
++ bfd_boolean has_grown;
++
++ /* Only used by constant pool entries. When this drops to zero, the
++ frag is discarded (i.e. size_adjust is set to -4.) */
++ int refcount;
++};
++
++struct avr32_relax_data
++{
++ unsigned int frag_count;
++ struct fragment *frag;
++ struct avr32_reloc_data *reloc_data;
++
++ /* TRUE if this section has one or more relaxable relocations */
++ bfd_boolean is_relaxable;
++ unsigned int iteration;
++};
++
++struct avr32_section_data
++{
++ struct bfd_elf_section_data elf;
++ struct avr32_relax_data relax_data;
++};
++
++ /* Relax state definitions */
++
++#define PIC_MOV2_OPCODE 0xe0600000
++#define PIC_MOV2_MASK 0xe1e00000
++#define PIC_MOV2_RANGE_MIN (-1048576 * 4)
++#define PIC_MOV2_RANGE_MAX (1048575 * 4)
++#define PIC_MCALL_OPCODE 0xf0160000
++#define PIC_MCALL_MASK 0xffff0000
++#define PIC_MCALL_RANGE_MIN (-131072)
++#define PIC_MCALL_RANGE_MAX (131068)
++#define RCALL2_OPCODE 0xe0a00000
++#define RCALL2_MASK 0xe1ef0000
++#define RCALL2_RANGE_MIN (-2097152)
++#define RCALL2_RANGE_MAX (2097150)
++#define RCALL1_OPCODE 0xc00c0000
++#define RCALL1_MASK 0xf00c0000
++#define RCALL1_RANGE_MIN (-1024)
++#define RCALL1_RANGE_MAX (1022)
++#define PIC_LDW4_OPCODE 0xecf00000
++#define PIC_LDW4_MASK 0xfff00000
++#define PIC_LDW4_RANGE_MIN (-32768)
++#define PIC_LDW4_RANGE_MAX (32767)
++#define PIC_LDW3_OPCODE 0x6c000000
++#define PIC_LDW3_MASK 0xfe000000
++#define PIC_LDW3_RANGE_MIN (0)
++#define PIC_LDW3_RANGE_MAX (124)
++#define SUB5_PC_OPCODE 0xfec00000
++#define SUB5_PC_MASK 0xfff00000
++#define SUB5_PC_RANGE_MIN (-32768)
++#define SUB5_PC_RANGE_MAX (32767)
++#define NOPIC_MCALL_OPCODE 0xf01f0000
++#define NOPIC_MCALL_MASK 0xffff0000
++#define NOPIC_MCALL_RANGE_MIN PIC_MCALL_RANGE_MIN
++#define NOPIC_MCALL_RANGE_MAX PIC_MCALL_RANGE_MAX
++#define NOPIC_LDW4_OPCODE 0xfef00000
++#define NOPIC_LDW4_MASK 0xfff00000
++#define NOPIC_LDW4_RANGE_MIN PIC_LDW4_RANGE_MIN
++#define NOPIC_LDW4_RANGE_MAX PIC_LDW4_RANGE_MAX
++#define LDDPC_OPCODE 0x48000000
++#define LDDPC_MASK 0xf8000000
++#define LDDPC_RANGE_MIN 0
++#define LDDPC_RANGE_MAX 508
++
++#define NOPIC_MOV2_OPCODE 0xe0600000
++#define NOPIC_MOV2_MASK 0xe1e00000
++#define NOPIC_MOV2_RANGE_MIN (-1048576)
++#define NOPIC_MOV2_RANGE_MAX (1048575)
++#define NOPIC_MOV1_OPCODE 0x30000000
++#define NOPIC_MOV1_MASK 0xf0000000
++#define NOPIC_MOV1_RANGE_MIN (-128)
++#define NOPIC_MOV1_RANGE_MAX (127)
++
++/* Only brc2 variants with cond[3] == 0 is considered, since the
++ others are not relaxable. bral is a special case and is handled
++ separately. */
++#define BRC2_OPCODE 0xe0800000
++#define BRC2_MASK 0xe1e80000
++#define BRC2_RANGE_MIN (-2097152)
++#define BRC2_RANGE_MAX (2097150)
++#define BRC1_OPCODE 0xc0000000
++#define BRC1_MASK 0xf0080000
++#define BRC1_RANGE_MIN (-256)
++#define BRC1_RANGE_MAX (254)
++#define BRAL_OPCODE 0xe08f0000
++#define BRAL_MASK 0xe1ef0000
++#define BRAL_RANGE_MIN BRC2_RANGE_MIN
++#define BRAL_RANGE_MAX BRC2_RANGE_MAX
++#define RJMP_OPCODE 0xc0080000
++#define RJMP_MASK 0xf00c0000
++#define RJMP_RANGE_MIN (-1024)
++#define RJMP_RANGE_MAX (1022)
++
++/* Define a relax state using the GOT */
++#define RG(id, dir, next, prev, r_type, opc, size) \
++ { "RS_"#id, RS_##id, RS_##dir, RS_##next, RS_##prev, REF_GOT, \
++ R_AVR32_##r_type, opc##_OPCODE, opc##_MASK, \
++ opc##_RANGE_MIN, opc##_RANGE_MAX, size }
++/* Define a relax state using the Constant Pool */
++#define RC(id, dir, next, prev, r_type, opc, size) \
++ { "RS_"#id, RS_##id, RS_##dir, RS_##next, RS_##prev, REF_CPOOL, \
++ R_AVR32_##r_type, opc##_OPCODE, opc##_MASK, \
++ opc##_RANGE_MIN, opc##_RANGE_MAX, size }
++
++/* Define a relax state using pc-relative direct reference */
++#define RP(id, dir, next, prev, r_type, opc, size) \
++ { "RS_"#id, RS_##id, RS_##dir, RS_##next, RS_##prev, REF_PCREL, \
++ R_AVR32_##r_type, opc##_OPCODE, opc##_MASK, \
++ opc##_RANGE_MIN, opc##_RANGE_MAX, size }
++
++/* Define a relax state using non-pc-relative direct reference */
++#define RD(id, dir, next, prev, r_type, opc, size) \
++ { "RS_"#id, RS_##id, RS_##dir, RS_##next, RS_##prev, REF_ABSOLUTE, \
++ R_AVR32_##r_type, opc##_OPCODE, opc##_MASK, \
++ opc##_RANGE_MIN, opc##_RANGE_MAX, size }
++
++/* Define a relax state that will be handled specially */
++#define RS(id, r_type, size) \
++ { "RS_"#id, RS_##id, RS_NONE, RS_NONE, RS_NONE, REF_ABSOLUTE, \
++ R_AVR32_##r_type, 0, 0, 0, 0, size }
++
++const struct relax_state relax_state[RS_MAX] = {
++ RS(NONE, NONE, 0),
++ RS(ALIGN, ALIGN, 0),
++ RS(CPENT, 32_CPENT, 4),
++
++ RG(PIC_CALL, PIC_RCALL1, PIC_MCALL, NONE, GOTCALL, PIC_MOV2, 10),
++ RG(PIC_MCALL, PIC_RCALL1, NONE, PIC_CALL, GOT18SW, PIC_MCALL, 4),
++ RP(PIC_RCALL2, NONE, PIC_RCALL1, PIC_MCALL, 22H_PCREL, RCALL2, 4),
++ RP(PIC_RCALL1, NONE, NONE, PIC_RCALL2, 11H_PCREL, RCALL1, 2),
++
++ RG(PIC_LDA, PIC_SUB5, PIC_LDW4, NONE, LDA_GOT, PIC_MOV2, 8),
++ RG(PIC_LDW4, PIC_SUB5, PIC_LDW3, PIC_LDA, GOT16S, PIC_LDW4, 4),
++ RG(PIC_LDW3, PIC_SUB5, NONE, PIC_LDW4, GOT7UW, PIC_LDW3, 2),
++ RP(PIC_SUB5, NONE, NONE, PIC_LDW3, 16N_PCREL, SUB5_PC, 4),
++
++ RC(NOPIC_MCALL, NOPIC_RCALL1, NONE, NONE, CPCALL, NOPIC_MCALL, 4),
++ RP(NOPIC_RCALL2, NONE, NOPIC_RCALL1, NOPIC_MCALL, 22H_PCREL, RCALL2, 4),
++ RP(NOPIC_RCALL1, NONE, NONE, NOPIC_RCALL2, 11H_PCREL, RCALL1, 2),
++
++ RC(NOPIC_LDW4, NOPIC_MOV1, NOPIC_LDDPC, NONE, 16_CP, NOPIC_LDW4, 4),
++ RC(NOPIC_LDDPC, NOPIC_MOV1, NONE, NOPIC_LDW4, 9W_CP, LDDPC, 2),
++ RP(NOPIC_SUB5, NOPIC_MOV1, NONE, NOPIC_LDDPC, 16N_PCREL, SUB5_PC, 4),
++ RD(NOPIC_MOV2, NONE, NOPIC_MOV1, NOPIC_SUB5, 21S, NOPIC_MOV2, 4),
++ RD(NOPIC_MOV1, NONE, NONE, NOPIC_MOV2, 8S, NOPIC_MOV1, 2),
++
++ RP(RCALL2, NONE, RCALL1, NONE, 22H_PCREL, RCALL2, 4),
++ RP(RCALL1, NONE, NONE, RCALL2, 11H_PCREL, RCALL1, 2),
++ RP(BRC2, NONE, BRC1, NONE, 22H_PCREL, BRC2, 4),
++ RP(BRC1, NONE, NONE, BRC2, 9H_PCREL, BRC1, 2),
++ RP(BRAL, NONE, RJMP, NONE, 22H_PCREL, BRAL, 4),
++ RP(RJMP, NONE, NONE, BRAL, 11H_PCREL, RJMP, 2),
++};
++
++static bfd_boolean
++avr32_elf_new_section_hook(bfd *abfd, asection *sec)
++{
++ struct avr32_section_data *sdata;
++
++ sdata = bfd_zalloc(abfd, sizeof(struct avr32_section_data));
++ if (!sdata)
++ return FALSE;
++
++ sec->used_by_bfd = sdata;
++ return _bfd_elf_new_section_hook(abfd, sec);
++}
++
++static struct avr32_relax_data *
++avr32_relax_data(asection *sec)
++{
++ struct avr32_section_data *sdata;
++
++ BFD_ASSERT(sec->used_by_bfd);
++
++ sdata = (struct avr32_section_data *)elf_section_data(sec);
++ return &sdata->relax_data;
++}
++
++ /* Link-time relaxation */
++
++static bfd_boolean
++avr32_elf_relax_section(bfd *abfd, asection *sec,
++ struct bfd_link_info *info, bfd_boolean *again);
++
++enum relax_pass_id {
++ RELAX_PASS_SIZE_FRAGS,
++ RELAX_PASS_MOVE_DATA,
++};
++
++/* Stolen from the xtensa port */
++static int
++internal_reloc_compare (const void *ap, const void *bp)
++{
++ const Elf_Internal_Rela *a = (const Elf_Internal_Rela *) ap;
++ const Elf_Internal_Rela *b = (const Elf_Internal_Rela *) bp;
++
++ if (a->r_offset != b->r_offset)
++ return (a->r_offset - b->r_offset);
++
++ /* We don't need to sort on these criteria for correctness,
++ but enforcing a more strict ordering prevents unstable qsort
++ from behaving differently with different implementations.
++ Without the code below we get correct but different results
++ on Solaris 2.7 and 2.8. We would like to always produce the
++ same results no matter the host. */
++
++ if (a->r_info != b->r_info)
++ return (a->r_info - b->r_info);
++
++ return (a->r_addend - b->r_addend);
++}
++
++static enum relax_state_id
++get_pcrel22_relax_state(bfd *abfd, asection *sec, struct bfd_link_info *info,
++ const Elf_Internal_Rela *rela)
++{
++ bfd_byte *contents;
++ bfd_vma insn;
++ enum relax_state_id rs = RS_NONE;
++
++ contents = retrieve_contents(abfd, sec, info->keep_memory);
++ if (!contents)
++ return RS_ERROR;
++
++ insn = bfd_get_32(abfd, contents + rela->r_offset);
++ if ((insn & RCALL2_MASK) == RCALL2_OPCODE)
++ rs = RS_RCALL2;
++ else if ((insn & BRAL_MASK) == BRAL_OPCODE)
++ /* Optimizing bral -> rjmp gets us into all kinds of
++ trouble with jump tables. Better not do it. */
++ rs = RS_NONE;
++ else if ((insn & BRC2_MASK) == BRC2_OPCODE)
++ rs = RS_BRC2;
++
++ release_contents(sec, contents);
++
++ return rs;
++}
++
++static enum relax_state_id
++get_initial_relax_state(bfd *abfd, asection *sec, struct bfd_link_info *info,
++ const Elf_Internal_Rela *rela)
++{
++ switch (ELF_R_TYPE(rela->r_info))
++ {
++ case R_AVR32_GOTCALL:
++ return RS_PIC_CALL;
++ case R_AVR32_GOT18SW:
++ return RS_PIC_MCALL;
++ case R_AVR32_LDA_GOT:
++ return RS_PIC_LDA;
++ case R_AVR32_GOT16S:
++ return RS_PIC_LDW4;
++ case R_AVR32_CPCALL:
++ return RS_NOPIC_MCALL;
++ case R_AVR32_16_CP:
++ return RS_NOPIC_LDW4;
++ case R_AVR32_9W_CP:
++ return RS_NOPIC_LDDPC;
++ case R_AVR32_ALIGN:
++ return RS_ALIGN;
++ case R_AVR32_32_CPENT:
++ return RS_CPENT;
++ case R_AVR32_22H_PCREL:
++ return get_pcrel22_relax_state(abfd, sec, info, rela);
++ case R_AVR32_9H_PCREL:
++ return RS_BRC1;
++ default:
++ return RS_NONE;
++ }
++}
++
++static bfd_boolean
++reloc_is_cpool_ref(const Elf_Internal_Rela *rela)
++{
++ switch (ELF_R_TYPE(rela->r_info))
++ {
++ case R_AVR32_CPCALL:
++ case R_AVR32_16_CP:
++ case R_AVR32_9W_CP:
++ return TRUE;
++ default:
++ return FALSE;
++ }
++}
++
++static struct fragment *
++new_frag(bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
++ struct avr32_relax_data *rd, enum relax_state_id state,
++ Elf_Internal_Rela *rela)
++{
++ struct fragment *frag;
++ bfd_size_type r_size;
++ bfd_vma r_offset;
++ unsigned int i = rd->frag_count;
++
++ BFD_ASSERT(state >= RS_NONE && state < RS_MAX);
++
++ rd->frag_count++;
++ frag = bfd_realloc(rd->frag, sizeof(struct fragment) * rd->frag_count);
++ if (!frag)
++ return NULL;
++ rd->frag = frag;
++
++ frag += i;
++ memset(frag, 0, sizeof(struct fragment));
++
++ if (state == RS_ALIGN)
++ r_size = (((rela->r_offset + (1 << rela->r_addend) - 1)
++ & ~((1 << rela->r_addend) - 1)) - rela->r_offset);
++ else
++ r_size = relax_state[state].size;
++
++ if (rela)
++ r_offset = rela->r_offset;
++ else
++ r_offset = sec->size;
++
++ if (i == 0)
++ {
++ frag->offset = 0;
++ frag->size = r_offset + r_size;
++ }
++ else
++ {
++ frag->offset = rd->frag[i - 1].offset + rd->frag[i - 1].size;
++ frag->size = r_offset + r_size - frag->offset;
++ }
++
++ if (state != RS_CPENT)
++ /* Make sure we don't discard this frag */
++ frag->refcount = 1;
++
++ frag->initial_state = frag->state = state;
++ frag->rela = rela;
++
++ return frag;
++}
++
++static struct fragment *
++find_frag(asection *sec, bfd_vma offset)
++{
++ struct fragment *first, *last;
++ struct avr32_relax_data *rd = avr32_relax_data(sec);
++
++ if (rd->frag_count == 0)
++ return NULL;
++
++ first = &rd->frag[0];
++ last = &rd->frag[rd->frag_count - 1];
++
++ /* This may be a reloc referencing the end of a section. The last
++ frag will never have a reloc associated with it, so its size will
++ never change, thus the offset adjustment of the last frag will
++ always be the same as the offset adjustment of the end of the
++ section. */
++ if (offset == sec->size)
++ {
++ BFD_ASSERT(last->offset + last->size == sec->size);
++ BFD_ASSERT(!last->rela);
++ return last;
++ }
++
++ while (first <= last)
++ {
++ struct fragment *mid;
++
++ mid = (last - first) / 2 + first;
++ if ((mid->offset + mid->size) <= offset)
++ first = mid + 1;
++ else if (mid->offset > offset)
++ last = mid - 1;
++ else
++ return mid;
++ }
++
++ return NULL;
++}
++
++/* Look through all relocs in a section and determine if any relocs
++ may be affected by relaxation in other sections. If so, allocate
++ an array of additional relocation data which links the affected
++ relocations to the frag(s) where the relaxation may occur.
++
++ This function also links cpool references to cpool entries and
++ increments the refcount of the latter when this happens. */
++
++static bfd_boolean
++allocate_reloc_data(bfd *abfd, asection *sec, Elf_Internal_Rela *relocs,
++ struct bfd_link_info *info)
++{
++ Elf_Internal_Shdr *symtab_hdr;
++ Elf_Internal_Sym *isymbuf = NULL;
++ struct avr32_relax_data *rd;
++ unsigned int i;
++ bfd_boolean ret = FALSE;
++
++ symtab_hdr = &elf_tdata(abfd)->symtab_hdr;
++ rd = avr32_relax_data(sec);
++
++ RDBG("%s<%s>: allocate_reloc_data\n", abfd->filename, sec->name);
++
++ for (i = 0; i < sec->reloc_count; i++)
++ {
++ Elf_Internal_Rela *rel = &relocs[i];
++ asection *sym_sec;
++ unsigned long r_symndx;
++ bfd_vma sym_value;
++
++ if (!rel->r_addend && ELF_R_TYPE(rel->r_info) != R_AVR32_DIFF32
++ && !reloc_is_cpool_ref(rel))
++ continue;
++
++ r_symndx = ELF_R_SYM(rel->r_info);
++
++ if (r_symndx < symtab_hdr->sh_info)
++ {
++ Elf_Internal_Sym *isym;
++
++ if (!isymbuf)
++ isymbuf = retrieve_local_syms(abfd, info->keep_memory);
++ if (!isymbuf)
++ return FALSE;
++
++ isym = &isymbuf[r_symndx];
++ sym_sec = bfd_section_from_elf_index(abfd, isym->st_shndx);
++ sym_value = isym->st_value;
++ }
++ else
++ {
++ struct elf_link_hash_entry *h;
++
++ h = elf_sym_hashes(abfd)[r_symndx - symtab_hdr->sh_info];
++
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *)h->root.u.i.link;
++
++ if (h->root.type != bfd_link_hash_defined
++ && h->root.type != bfd_link_hash_defweak)
++ continue;
++
++ sym_sec = h->root.u.def.section;
++ sym_value = h->root.u.def.value;
++ }
++
++ if (sym_sec && avr32_relax_data(sym_sec)->is_relaxable)
++ {
++ bfd_size_type size;
++ struct fragment *frag;
++
++ if (!rd->reloc_data)
++ {
++ size = sizeof(struct avr32_reloc_data) * sec->reloc_count;
++ rd->reloc_data = bfd_zalloc(abfd, size);
++ if (!rd->reloc_data)
++ goto out;
++ }
++
++ RDBG("[%3d] 0x%04lx: target: 0x%lx + 0x%lx",
++ i, rel->r_offset, sym_value, rel->r_addend);
++
++ frag = find_frag(sym_sec, sym_value + rel->r_addend);
++ BFD_ASSERT(frag);
++ rd->reloc_data[i].add_frag = frag;
++
++ RDBG(" -> %s<%s>:%04lx\n", sym_sec->owner->filename, sym_sec->name,
++ frag->rela ? frag->rela->r_offset : sym_sec->size);
++
++ if (reloc_is_cpool_ref(rel))
++ {
++ BFD_ASSERT(ELF_R_TYPE(frag->rela->r_info) == R_AVR32_32_CPENT);
++ frag->refcount++;
++ }
++
++ if (ELF_R_TYPE(rel->r_info) == R_AVR32_DIFF32)
++ {
++ bfd_byte *contents;
++ bfd_signed_vma diff;
++
++ contents = retrieve_contents(abfd, sec, info->keep_memory);
++ if (!contents)
++ goto out;
++
++ diff = bfd_get_signed_32(abfd, contents + rel->r_offset);
++ frag = find_frag(sym_sec, sym_value + rel->r_addend + diff);
++ BFD_ASSERT(frag);
++ rd->reloc_data[i].sub_frag = frag;
++
++ release_contents(sec, contents);
++ }
++ }
++ }
++
++ ret = TRUE;
++
++ out:
++ release_local_syms(abfd, isymbuf);
++ return ret;
++}
++
++static bfd_boolean
++global_sym_set_frag(struct elf_avr32_link_hash_entry *havr,
++ struct bfd_link_info *info ATTRIBUTE_UNUSED)
++{
++ struct fragment *frag;
++ asection *sec;
++
++ if (havr->root.root.type != bfd_link_hash_defined
++ && havr->root.root.type != bfd_link_hash_defweak)
++ return TRUE;
++
++ sec = havr->root.root.u.def.section;
++ if (bfd_is_const_section(sec)
++ || !avr32_relax_data(sec)->is_relaxable)
++ return TRUE;
++
++ frag = find_frag(sec, havr->root.root.u.def.value);
++ if (!frag)
++ {
++ unsigned int i;
++ struct avr32_relax_data *rd = avr32_relax_data(sec);
++
++ RDBG("In %s: No frag for %s <%s+%lu> (limit %lu)\n",
++ sec->owner->filename, havr->root.root.root.string,
++ sec->name, havr->root.root.u.def.value, sec->size);
++ for (i = 0; i < rd->frag_count; i++)
++ RDBG(" %8lu - %8lu\n", rd->frag[i].offset,
++ rd->frag[i].offset + rd->frag[i].size);
++ }
++ BFD_ASSERT(frag);
++
++ havr->sym_frag = frag;
++ return TRUE;
++}
++
++static bfd_boolean
++analyze_relocations(struct bfd_link_info *info)
++{
++ bfd *abfd;
++ asection *sec;
++
++ /* Divide all relaxable sections into fragments */
++ for (abfd = info->input_bfds; abfd; abfd = abfd->link_next)
++ {
++ if (!(elf_elfheader(abfd)->e_flags & EF_AVR32_LINKRELAX))
++ {
++ if (!(*info->callbacks->warning)
++ (info, _("input is not relaxable"), NULL, abfd, NULL, 0))
++ return FALSE;
++ continue;
++ }
++
++ for (sec = abfd->sections; sec; sec = sec->next)
++ {
++ struct avr32_relax_data *rd;
++ struct fragment *frag;
++ Elf_Internal_Rela *relocs;
++ unsigned int i;
++ bfd_boolean ret = TRUE;
++
++ if (!(sec->flags & SEC_RELOC) || sec->reloc_count == 0)
++ continue;
++
++ rd = avr32_relax_data(sec);
++
++ relocs = retrieve_internal_relocs(abfd, sec, info->keep_memory);
++ if (!relocs)
++ return FALSE;
++
++ qsort(relocs, sec->reloc_count, sizeof(Elf_Internal_Rela),
++ internal_reloc_compare);
++
++ for (i = 0; i < sec->reloc_count; i++)
++ {
++ enum relax_state_id state;
++
++ ret = FALSE;
++ state = get_initial_relax_state(abfd, sec, info, &relocs[i]);
++ if (state == RS_ERROR)
++ break;
++
++ if (state)
++ {
++ frag = new_frag(abfd, sec, rd, state, &relocs[i]);
++ if (!frag)
++ break;
++
++ pin_internal_relocs(sec, relocs);
++ rd->is_relaxable = TRUE;
++ }
++
++ ret = TRUE;
++ }
++
++ release_internal_relocs(sec, relocs);
++ if (!ret)
++ return ret;
++
++ if (rd->is_relaxable)
++ {
++ frag = new_frag(abfd, sec, rd, RS_NONE, NULL);
++ if (!frag)
++ return FALSE;
++ }
++ }
++ }
++
++ /* Link each global symbol to the fragment where it's defined. */
++ elf_link_hash_traverse(elf_hash_table(info), global_sym_set_frag, info);
++
++ /* Do the same for local symbols. */
++ for (abfd = info->input_bfds; abfd; abfd = abfd->link_next)
++ {
++ Elf_Internal_Sym *isymbuf, *isym;
++ struct fragment **local_sym_frag;
++ unsigned int i, sym_count;
++
++ sym_count = elf_tdata(abfd)->symtab_hdr.sh_info;
++ if (sym_count == 0)
++ continue;
++
++ local_sym_frag = bfd_zalloc(abfd, sym_count * sizeof(struct fragment *));
++ if (!local_sym_frag)
++ return FALSE;
++ elf_tdata(abfd)->local_sym_frag = local_sym_frag;
++
++ isymbuf = retrieve_local_syms(abfd, info->keep_memory);
++ if (!isymbuf)
++ return FALSE;
++
++ for (i = 0; i < sym_count; i++)
++ {
++ struct avr32_relax_data *rd;
++ struct fragment *frag;
++ asection *sec;
++
++ isym = &isymbuf[i];
++
++ sec = bfd_section_from_elf_index(abfd, isym->st_shndx);
++ if (!sec)
++ continue;
++
++ rd = avr32_relax_data(sec);
++ if (!rd->is_relaxable)
++ continue;
++
++ frag = find_frag(sec, isym->st_value);
++ BFD_ASSERT(frag);
++
++ local_sym_frag[i] = frag;
++ }
++
++ release_local_syms(abfd, isymbuf);
++ }
++
++ /* And again for relocs with addends and constant pool references */
++ for (abfd = info->input_bfds; abfd; abfd = abfd->link_next)
++ for (sec = abfd->sections; sec; sec = sec->next)
++ {
++ Elf_Internal_Rela *relocs;
++ bfd_boolean ret;
++
++ if (!(sec->flags & SEC_RELOC) || sec->reloc_count == 0)
++ continue;
++
++ relocs = retrieve_internal_relocs(abfd, sec, info->keep_memory);
++ if (!relocs)
++ return FALSE;
++
++ ret = allocate_reloc_data(abfd, sec, relocs, info);
++
++ release_internal_relocs(sec, relocs);
++ if (ret == FALSE)
++ return ret;
++ }
++
++ return TRUE;
++}
++
++static bfd_boolean
++rs_is_good_enough(const struct relax_state *rs, struct fragment *frag,
++ bfd_vma symval, bfd_vma addr, struct got_entry *got,
++ struct avr32_reloc_data *ind_data,
++ bfd_signed_vma offset_adjust)
++{
++ bfd_signed_vma target = 0;
++
++ switch (rs->reftype)
++ {
++ case REF_ABSOLUTE:
++ target = symval;
++ break;
++ case REF_PCREL:
++ target = symval - addr;
++ break;
++ case REF_CPOOL:
++ /* cpool frags are always in the same section and always after
++ all frags referring to it. So it's always correct to add in
++ offset_adjust here. */
++ target = (ind_data->add_frag->offset + ind_data->add_frag->offset_adjust
++ + offset_adjust - frag->offset - frag->offset_adjust);
++ break;
++ case REF_GOT:
++ target = got->offset;
++ break;
++ default:
++ abort();
++ }
++
++ if (target >= rs->range_min && target <= rs->range_max)
++ return TRUE;
++ else
++ return FALSE;
++}
++
++static bfd_boolean
++avr32_size_frags(bfd *abfd, asection *sec, struct bfd_link_info *info)
++{
++ struct elf_avr32_link_hash_table *htab;
++ struct avr32_relax_data *rd;
++ Elf_Internal_Shdr *symtab_hdr;
++ Elf_Internal_Rela *relocs = NULL;
++ Elf_Internal_Sym *isymbuf = NULL;
++ struct got_entry **local_got_ents;
++ struct fragment **local_sym_frag;
++ bfd_boolean ret = FALSE;
++ bfd_signed_vma delta = 0;
++ unsigned int i;
++
++ htab = avr32_elf_hash_table(info);
++ rd = avr32_relax_data(sec);
++
++ if (sec == htab->sgot)
++ {
++ RDBG("Relaxing GOT section (vma: 0x%lx)\n",
++ sec->output_section->vma + sec->output_offset);
++ if (assign_got_offsets(htab))
++ htab->repeat_pass = TRUE;
++ return TRUE;
++ }
++
++ if (!rd->is_relaxable)
++ return TRUE;
++
++ if (!sec->rawsize)
++ sec->rawsize = sec->size;
++
++ symtab_hdr = &elf_tdata(abfd)->symtab_hdr;
++ relocs = retrieve_internal_relocs(abfd, sec, info->keep_memory);
++ if (!relocs)
++ goto out;
++
++ isymbuf = retrieve_local_syms(abfd, info->keep_memory);
++ if (!isymbuf)
++ goto out;
++
++ local_got_ents = elf_local_got_ents(abfd);
++ local_sym_frag = elf_tdata(abfd)->local_sym_frag;
++
++ RDBG("size_frags: %s<%s>\n vma: 0x%08lx, size: 0x%08lx\n",
++ abfd->filename, sec->name,
++ sec->output_section->vma + sec->output_offset, sec->size);
++
++ for (i = 0; i < rd->frag_count; i++)
++ {
++ struct fragment *frag = &rd->frag[i];
++ struct avr32_reloc_data *r_data = NULL, *ind_data = NULL;
++ const struct relax_state *state, *next_state;
++ struct fragment *target_frag = NULL;
++ asection *sym_sec = NULL;
++ Elf_Internal_Rela *rela;
++ struct got_entry *got;
++ bfd_vma symval, r_offset, addend, addr;
++ bfd_signed_vma size_adjust = 0, distance;
++ unsigned long r_symndx;
++ bfd_boolean defined = TRUE, dynamic = FALSE;
++ unsigned char sym_type;
++
++ frag->offset_adjust += delta;
++ state = next_state = &relax_state[frag->state];
++ rela = frag->rela;
++
++ BFD_ASSERT(state->id == frag->state);
++
++ RDBG(" 0x%04lx%c%d: %s [size %ld]", rela ? rela->r_offset : sec->rawsize,
++ (frag->offset_adjust < 0)?'-':'+',
++ abs(frag->offset_adjust), state->name, state->size);
++
++ if (!rela)
++ {
++ RDBG(": no reloc, ignoring\n");
++ continue;
++ }
++
++ BFD_ASSERT((unsigned int)(rela - relocs) < sec->reloc_count);
++ BFD_ASSERT(state != RS_NONE);
++
++ r_offset = rela->r_offset + frag->offset_adjust;
++ addr = sec->output_section->vma + sec->output_offset + r_offset;
++
++ switch (frag->state)
++ {
++ case RS_ALIGN:
++ size_adjust = ((addr + (1 << rela->r_addend) - 1)
++ & ~((1 << rela->r_addend) - 1));
++ size_adjust -= (sec->output_section->vma + sec->output_offset
++ + frag->offset + frag->offset_adjust
++ + frag->size + frag->size_adjust);
++
++ RDBG(": adjusting size %lu -> %lu\n", frag->size + frag->size_adjust,
++ frag->size + frag->size_adjust + size_adjust);
++ break;
++
++ case RS_CPENT:
++ if (frag->refcount == 0 && frag->size_adjust == 0)
++ {
++ RDBG(": discarding frag\n");
++ size_adjust = -4;
++ }
++ else if (frag->refcount > 0 && frag->size_adjust < 0)
++ {
++ RDBG(": un-discarding frag\n");
++ size_adjust = 4;
++ }
++ break;
++
++ default:
++ if (rd->reloc_data)
++ r_data = &rd->reloc_data[frag->rela - relocs];
++
++ /* If this is a cpool reference, we want the symbol that the
++ cpool entry refers to, not the symbol for the cpool entry
++ itself, as we already know what frag it's in. */
++ if (relax_state[frag->initial_state].reftype == REF_CPOOL)
++ {
++ Elf_Internal_Rela *irela = r_data->add_frag->rela;
++
++ r_symndx = ELF_R_SYM(irela->r_info);
++ addend = irela->r_addend;
++
++ /* The constant pool must be in the same section as the
++ reloc referring to it. */
++ BFD_ASSERT((unsigned long)(irela - relocs) < sec->reloc_count);
++
++ ind_data = r_data;
++ r_data = &rd->reloc_data[irela - relocs];
++ }
++ else
++ {
++ r_symndx = ELF_R_SYM(rela->r_info);
++ addend = rela->r_addend;
++ }
++
++ /* Get the value of the symbol referred to by the reloc. */
++ if (r_symndx < symtab_hdr->sh_info)
++ {
++ Elf_Internal_Sym *isym;
++
++ isym = isymbuf + r_symndx;
++ symval = 0;
++
++ RDBG(" local sym %lu: ", r_symndx);
++
++ if (isym->st_shndx == SHN_UNDEF)
++ defined = FALSE;
++ else if (isym->st_shndx == SHN_ABS)
++ sym_sec = bfd_abs_section_ptr;
++ else if (isym->st_shndx == SHN_COMMON)
++ sym_sec = bfd_com_section_ptr;
++ else
++ sym_sec = bfd_section_from_elf_index(abfd, isym->st_shndx);
++
++ symval = isym->st_value;
++ sym_type = ELF_ST_TYPE(isym->st_info);
++ target_frag = local_sym_frag[r_symndx];
++
++ if (local_got_ents)
++ got = local_got_ents[r_symndx];
++ else
++ got = NULL;
++ }
++ else
++ {
++ /* Global symbol */
++ unsigned long index;
++ struct elf_link_hash_entry *h;
++ struct elf_avr32_link_hash_entry *havr;
++
++ index = r_symndx - symtab_hdr->sh_info;
++ h = elf_sym_hashes(abfd)[index];
++ BFD_ASSERT(h != NULL);
++
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *)h->root.u.i.link;
++
++ havr = (struct elf_avr32_link_hash_entry *)h;
++ got = h->got.glist;
++
++ symval = 0;
++
++ RDBG(" %s: ", h->root.root.string);
++
++ if (h->root.type != bfd_link_hash_defined
++ && h->root.type != bfd_link_hash_defweak)
++ {
++ RDBG("(undef)");
++ defined = FALSE;
++ }
++ else if ((info->shared && !info->symbolic && h->dynindx != -1)
++ || (htab->root.dynamic_sections_created
++ && h->def_dynamic && !h->def_regular))
++ {
++ RDBG("(dynamic)");
++ dynamic = TRUE;
++ sym_sec = h->root.u.def.section;
++ }
++ else
++ {
++ sym_sec = h->root.u.def.section;
++ symval = h->root.u.def.value;
++ target_frag = havr->sym_frag;
++ }
++
++ sym_type = h->type;
++ }
++
++ /* Thanks to elf32-ppc for this one. */
++ if (sym_sec && sym_sec->sec_info_type == ELF_INFO_TYPE_MERGE)
++ {
++ /* At this stage in linking, no SEC_MERGE symbol has been
++ adjusted, so all references to such symbols need to be
++ passed through _bfd_merged_section_offset. (Later, in
++ relocate_section, all SEC_MERGE symbols *except* for
++ section symbols have been adjusted.)
++
++ SEC_MERGE sections are not relaxed by us, as they
++ shouldn't contain any code. */
++
++ BFD_ASSERT(!target_frag && !(r_data && r_data->add_frag));
++
++ /* gas may reduce relocations against symbols in SEC_MERGE
++ sections to a relocation against the section symbol when
++ the original addend was zero. When the reloc is against
++ a section symbol we should include the addend in the
++ offset passed to _bfd_merged_section_offset, since the
++ location of interest is the original symbol. On the
++ other hand, an access to "sym+addend" where "sym" is not
++ a section symbol should not include the addend; Such an
++ access is presumed to be an offset from "sym"; The
++ location of interest is just "sym". */
++ RDBG("\n MERGE: %s: 0x%lx+0x%lx+0x%lx -> ",
++ (sym_type == STT_SECTION)?"section":"not section",
++ sym_sec->output_section->vma + sym_sec->output_offset,
++ symval, addend);
++
++ if (sym_type == STT_SECTION)
++ symval += addend;
++
++ symval = (_bfd_merged_section_offset
++ (abfd, &sym_sec,
++ elf_section_data(sym_sec)->sec_info, symval));
++
++ if (sym_type != STT_SECTION)
++ symval += addend;
++ }
++ else
++ symval += addend;
++
++ if (defined && !dynamic)
++ {
++ RDBG("0x%lx+0x%lx",
++ sym_sec->output_section->vma + sym_sec->output_offset,
++ symval);
++ symval += sym_sec->output_section->vma + sym_sec->output_offset;
++ }
++
++ if (r_data && r_data->add_frag)
++ /* If the add_frag pointer is set, it means that this reloc
++ has an addend that may be affected by relaxation. */
++ target_frag = r_data->add_frag;
++
++ if (target_frag)
++ {
++ symval += target_frag->offset_adjust;
++
++ /* If target_frag comes after this frag in the same
++ section, we should assume that it will be moved by
++ the same amount we are. */
++ if ((target_frag - rd->frag) < (int)rd->frag_count
++ && target_frag > frag)
++ symval += delta;
++ }
++
++ distance = symval - addr;
++
++ /* First, try to make a direct reference. If the symbol is
++ dynamic or undefined, we must take care not to change its
++ reference type, that is, we can't make it direct.
++
++ Also, it seems like some sections may actually be resized
++ after the relaxation code is done, so we can't really
++ trust that our "distance" is correct. There's really no
++ easy solution to this problem, so we'll just disallow
++ direct references to SEC_DATA sections.
++
++ Oh, and .bss isn't actually SEC_DATA, so we disallow
++ !SEC_HAS_CONTENTS as well. */
++ if (!dynamic && defined
++ && (htab->direct_data_refs
++ || (!(sym_sec->flags & SEC_DATA)
++ && (sym_sec->flags & SEC_HAS_CONTENTS)))
++ && next_state->direct)
++ {
++ next_state = &relax_state[next_state->direct];
++ RDBG(" D-> %s", next_state->name);
++ }
++
++ /* Iterate backwards until we find a state that fits. */
++ while (next_state->prev
++ && !rs_is_good_enough(next_state, frag, symval, addr,
++ got, ind_data, delta))
++ {
++ next_state = &relax_state[next_state->prev];
++ RDBG(" P-> %s", next_state->name);
++ }
++
++ /* Then try to find the best possible state. */
++ while (next_state->next)
++ {
++ const struct relax_state *candidate;
++
++ candidate = &relax_state[next_state->next];
++ if (!rs_is_good_enough(candidate, frag, symval, addr, got,
++ ind_data, delta))
++ break;
++
++ next_state = candidate;
++ RDBG(" N-> %s", next_state->name);
++ }
++
++ RDBG(" [size %ld]\n", next_state->size);
++
++ BFD_ASSERT(next_state->id);
++ BFD_ASSERT(!dynamic || next_state->reftype == REF_GOT);
++
++ size_adjust = next_state->size - state->size;
++
++ /* There's a theoretical possibility that shrinking one frag
++ may cause another to grow, which may cause the first one to
++ grow as well, and we're back where we started. Avoid this
++ scenario by disallowing a frag that has grown to ever
++ shrink again. */
++ if (state->reftype == REF_GOT && next_state->reftype != REF_GOT)
++ {
++ if (frag->has_grown)
++ next_state = state;
++ else
++ unref_got_entry(htab, got);
++ }
++ else if (state->reftype != REF_GOT && next_state->reftype == REF_GOT)
++ {
++ ref_got_entry(htab, got);
++ frag->has_grown = TRUE;
++ }
++ else if (state->reftype == REF_CPOOL
++ && next_state->reftype != REF_CPOOL)
++ {
++ if (frag->has_grown)
++ next_state = state;
++ else
++ ind_data->add_frag->refcount--;
++ }
++ else if (state->reftype != REF_CPOOL
++ && next_state->reftype == REF_CPOOL)
++ {
++ ind_data->add_frag->refcount++;
++ frag->has_grown = TRUE;
++ }
++ else
++ {
++ if (frag->has_grown && size_adjust < 0)
++ next_state = state;
++ else if (size_adjust > 0)
++ frag->has_grown = TRUE;
++ }
++
++ size_adjust = next_state->size - state->size;
++ frag->state = next_state->id;
++
++ break;
++ }
++
++ if (size_adjust)
++ htab->repeat_pass = TRUE;
++
++ frag->size_adjust += size_adjust;
++ sec->size += size_adjust;
++ delta += size_adjust;
++
++ BFD_ASSERT((frag->offset + frag->offset_adjust
++ + frag->size + frag->size_adjust)
++ == (frag[1].offset + frag[1].offset_adjust + delta));
++ }
++
++ ret = TRUE;
++
++ out:
++ release_local_syms(abfd, isymbuf);
++ release_internal_relocs(sec, relocs);
++ return ret;
++}
++
++static bfd_boolean
++adjust_global_symbol(struct elf_avr32_link_hash_entry *havr,
++ struct bfd_link_info *info ATTRIBUTE_UNUSED)
++{
++ struct elf_link_hash_entry *h = &havr->root;
++
++ if (havr->sym_frag && (h->root.type == bfd_link_hash_defined
++ || h->root.type == bfd_link_hash_defweak))
++ {
++ RDBG("adjust_global_symbol: %s 0x%08lx -> 0x%08lx\n",
++ h->root.root.string, h->root.u.def.value,
++ h->root.u.def.value + havr->sym_frag->offset_adjust);
++ h->root.u.def.value += havr->sym_frag->offset_adjust;
++ }
++ return TRUE;
++}
++
++static bfd_boolean
++adjust_syms(struct bfd_link_info *info)
++{
++ struct elf_avr32_link_hash_table *htab;
++ bfd *abfd;
++
++ htab = avr32_elf_hash_table(info);
++ elf_link_hash_traverse(&htab->root, adjust_global_symbol, info);
++
++ for (abfd = info->input_bfds; abfd; abfd = abfd->link_next)
++ {
++ Elf_Internal_Sym *isymbuf;
++ struct fragment **local_sym_frag, *frag;
++ unsigned int i, sym_count;
++
++ sym_count = elf_tdata(abfd)->symtab_hdr.sh_info;
++ if (sym_count == 0)
++ continue;
++
++ isymbuf = retrieve_local_syms(abfd, info->keep_memory);
++ if (!isymbuf)
++ return FALSE;
++
++ local_sym_frag = elf_tdata(abfd)->local_sym_frag;
++
++ for (i = 0; i < sym_count; i++)
++ {
++ frag = local_sym_frag[i];
++ if (frag)
++ {
++ RDBG("adjust_local_symbol: %s[%u] 0x%08lx -> 0x%08lx\n",
++ abfd->filename, i, isymbuf[i].st_value,
++ isymbuf[i].st_value + frag->offset_adjust);
++ isymbuf[i].st_value += frag->offset_adjust;
++ }
++ }
++
++ release_local_syms(abfd, isymbuf);
++ }
++
++ htab->symbols_adjusted = TRUE;
++ return TRUE;
++}
++
++static bfd_boolean
++adjust_relocs(bfd *abfd, asection *sec, struct bfd_link_info *info)
++{
++ struct avr32_relax_data *rd;
++ Elf_Internal_Rela *relocs;
++ Elf_Internal_Shdr *symtab_hdr;
++ unsigned int i;
++ bfd_boolean ret = FALSE;
++
++ rd = avr32_relax_data(sec);
++ if (!rd->reloc_data)
++ return TRUE;
++
++ RDBG("adjust_relocs: %s<%s> (count: %u)\n", abfd->filename, sec->name,
++ sec->reloc_count);
++
++ relocs = retrieve_internal_relocs(abfd, sec, info->keep_memory);
++ if (!relocs)
++ return FALSE;
++
++ symtab_hdr = &elf_tdata(abfd)->symtab_hdr;
++
++ for (i = 0; i < sec->reloc_count; i++)
++ {
++ Elf_Internal_Rela *rela = &relocs[i];
++ struct avr32_reloc_data *r_data = &rd->reloc_data[i];
++ struct fragment *sym_frag;
++ unsigned long r_symndx;
++
++ if (r_data->add_frag)
++ {
++ r_symndx = ELF_R_SYM(rela->r_info);
++
++ if (r_symndx < symtab_hdr->sh_info)
++ sym_frag = elf_tdata(abfd)->local_sym_frag[r_symndx];
++ else
++ {
++ struct elf_link_hash_entry *h;
++
++ h = elf_sym_hashes(abfd)[r_symndx - symtab_hdr->sh_info];
++
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *)h->root.u.i.link;
++
++ BFD_ASSERT(h->root.type == bfd_link_hash_defined
++ || h->root.type == bfd_link_hash_defweak);
++
++ sym_frag = ((struct elf_avr32_link_hash_entry *)h)->sym_frag;
++ }
++
++ RDBG(" addend: 0x%08lx -> 0x%08lx\n",
++ rela->r_addend,
++ rela->r_addend + r_data->add_frag->offset_adjust
++ - (sym_frag ? sym_frag->offset_adjust : 0));
++
++ /* If this is against a section symbol, we won't find any
++ sym_frag, so we'll just adjust the addend. */
++ rela->r_addend += r_data->add_frag->offset_adjust;
++ if (sym_frag)
++ rela->r_addend -= sym_frag->offset_adjust;
++
++ if (r_data->sub_frag)
++ {
++ bfd_byte *contents;
++ bfd_signed_vma diff;
++
++ contents = retrieve_contents(abfd, sec, info->keep_memory);
++ if (!contents)
++ goto out;
++
++ /* I realize now that sub_frag is misnamed. It's
++ actually add_frag which is subtracted in this
++ case... */
++ diff = bfd_get_signed_32(abfd, contents + rela->r_offset);
++ diff += (r_data->sub_frag->offset_adjust
++ - r_data->add_frag->offset_adjust);
++ bfd_put_32(abfd, diff, contents + rela->r_offset);
++
++ RDBG(" 0x%lx: DIFF32 updated: 0x%lx\n", rela->r_offset, diff);
++
++ release_contents(sec, contents);
++ }
++ }
++ else
++ BFD_ASSERT(!r_data->sub_frag);
++ }
++
++ ret = TRUE;
++
++ out:
++ release_internal_relocs(sec, relocs);
++ return ret;
++}
++
++static bfd_boolean
++avr32_move_data(bfd *abfd, asection *sec, struct bfd_link_info *info)
++{
++ struct elf_avr32_link_hash_table *htab;
++ struct avr32_relax_data *rd;
++ struct fragment *frag, *fragend;
++ Elf_Internal_Rela *relocs = NULL;
++ bfd_byte *contents = NULL;
++ unsigned int i;
++ bfd_boolean ret = FALSE;
++
++ htab = avr32_elf_hash_table(info);
++ rd = avr32_relax_data(sec);
++
++ if (!htab->symbols_adjusted)
++ if (!adjust_syms(info))
++ return FALSE;
++
++ if (rd->is_relaxable)
++ {
++ /* Resize the section first, so that we can be sure that enough
++ memory is allocated in case the section has grown. */
++ if (sec->size > sec->rawsize
++ && elf_section_data(sec)->this_hdr.contents)
++ {
++ /* We must not use cached data if the section has grown. */
++ free(elf_section_data(sec)->this_hdr.contents);
++ elf_section_data(sec)->this_hdr.contents = NULL;
++ }
++
++ relocs = retrieve_internal_relocs(abfd, sec, info->keep_memory);
++ if (!relocs)
++ goto out;
++ contents = retrieve_contents(abfd, sec, info->keep_memory);
++ if (!contents)
++ goto out;
++
++ fragend = rd->frag + rd->frag_count;
++
++ RDBG("move_data: %s<%s>: relocs=%p, contents=%p\n",
++ abfd->filename, sec->name, relocs, contents);
++
++ /* First, move the data into place. We must take care to move
++ frags in the right order so that we don't accidentally
++ overwrite parts of the next frag. */
++ for (frag = rd->frag; frag < fragend; frag++)
++ {
++ RDBG(" 0x%08lx%c0x%x: size 0x%lx%c0x%x\n",
++ frag->offset, frag->offset_adjust >= 0 ? '+' : '-',
++ abs(frag->offset_adjust),
++ frag->size, frag->size_adjust >= 0 ? '+' : '-',
++ abs(frag->size_adjust));
++ if (frag->offset_adjust > 0)
++ {
++ struct fragment *prev = frag - 1;
++ struct fragment *last;
++
++ for (last = frag; last < fragend && last->offset_adjust > 0;
++ last++) ;
++
++ if (last == fragend)
++ last--;
++
++ for (frag = last; frag != prev; frag--)
++ {
++ if (frag->offset_adjust
++ && frag->size + frag->size_adjust > 0)
++ {
++ RDBG("memmove 0x%lx -> 0x%lx (size %lu)\n",
++ frag->offset, frag->offset + frag->offset_adjust,
++ frag->size + frag->size_adjust);
++ memmove(contents + frag->offset + frag->offset_adjust,
++ contents + frag->offset,
++ frag->size + frag->size_adjust);
++ }
++ }
++ frag = last;
++ }
++ else if (frag->offset_adjust && frag->size + frag->size_adjust > 0)
++ {
++ RDBG("memmove 0x%lx -> 0x%lx (size %lu)\n",
++ frag->offset, frag->offset + frag->offset_adjust,
++ frag->size + frag->size_adjust);
++ memmove(contents + frag->offset + frag->offset_adjust,
++ contents + frag->offset,
++ frag->size + frag->size_adjust);
++ }
++ }
++
++ i = 0;
++
++ for (frag = rd->frag; frag < fragend; frag++)
++ {
++ const struct relax_state *state, *istate;
++ struct avr32_reloc_data *r_data = NULL;
++
++ istate = &relax_state[frag->initial_state];
++ state = &relax_state[frag->state];
++
++ if (rd->reloc_data)
++ r_data = &rd->reloc_data[frag->rela - relocs];
++
++ BFD_ASSERT((long)(frag->size + frag->size_adjust) >= 0);
++ BFD_ASSERT(state->reftype != REF_CPOOL
++ || r_data->add_frag->refcount > 0);
++
++ if (istate->reftype == REF_CPOOL && state->reftype != REF_CPOOL)
++ {
++ struct fragment *ifrag;
++
++ /* An indirect reference through the cpool has been
++ converted to a direct reference. We must update the
++ reloc to point to the symbol itself instead of the
++ constant pool entry. The reloc type will be updated
++ later. */
++ ifrag = r_data->add_frag;
++ frag->rela->r_info = ifrag->rela->r_info;
++ frag->rela->r_addend = ifrag->rela->r_addend;
++
++ /* Copy the reloc data so the addend will be adjusted
++ correctly later. */
++ *r_data = rd->reloc_data[ifrag->rela - relocs];
++ }
++
++ /* Move all relocs covered by this frag. */
++ if (frag->rela)
++ BFD_ASSERT(&relocs[i] <= frag->rela);
++ else
++ BFD_ASSERT((frag + 1) == fragend && frag->state == RS_NONE);
++
++ if (frag == rd->frag)
++ BFD_ASSERT(i == 0);
++ else
++ BFD_ASSERT(&relocs[i] > frag[-1].rela);
++
++ /* If non-null, frag->rela is the last relocation in the
++ fragment. frag->rela can only be null in the last
++ fragment, so in that case, we'll just do the rest. */
++ for (; (i < sec->reloc_count
++ && (!frag->rela || &relocs[i] <= frag->rela)); i++)
++ {
++ RDBG("[%4u] r_offset 0x%08lx -> 0x%08lx\n", i, relocs[i].r_offset,
++ relocs[i].r_offset + frag->offset_adjust);
++ relocs[i].r_offset += frag->offset_adjust;
++ }
++
++ if (frag->refcount == 0)
++ {
++ /* If this frag is to be discarded, make sure we won't
++ relocate it later on. */
++ BFD_ASSERT(frag->state == RS_CPENT);
++ frag->rela->r_info = ELF_R_INFO(ELF_R_SYM(frag->rela->r_info),
++ R_AVR32_NONE);
++ }
++ else if (frag->state == RS_ALIGN)
++ {
++ bfd_vma addr, addr_end;
++
++ addr = frag->rela->r_offset;
++ addr_end = (frag->offset + frag->offset_adjust
++ + frag->size + frag->size_adjust);
++
++ /* If the section is executable, insert NOPs.
++ Otherwise, insert zeroes. */
++ if (sec->flags & SEC_CODE)
++ {
++ if (addr & 1)
++ {
++ bfd_put_8(abfd, 0, contents + addr);
++ addr++;
++ }
++
++ BFD_ASSERT(!((addr_end - addr) & 1));
++
++ while (addr < addr_end)
++ {
++ bfd_put_16(abfd, NOP_OPCODE, contents + addr);
++ addr += 2;
++ }
++ }
++ else
++ memset(contents + addr, 0, addr_end - addr);
++ }
++ else if (state->opcode_mask)
++ {
++ bfd_vma insn;
++
++ /* Update the opcode and the relocation type unless it's a
++ "special" relax state (i.e. RS_NONE, RS_ALIGN or
++ RS_CPENT.), in which case the opcode mask is zero. */
++ insn = bfd_get_32(abfd, contents + frag->rela->r_offset);
++ insn &= ~state->opcode_mask;
++ insn |= state->opcode;
++ RDBG(" 0x%lx: inserting insn %08lx\n",
++ frag->rela->r_offset, insn);
++ bfd_put_32(abfd, insn, contents + frag->rela->r_offset);
++
++ frag->rela->r_info = ELF_R_INFO(ELF_R_SYM(frag->rela->r_info),
++ state->r_type);
++ }
++
++ if ((frag + 1) == fragend)
++ BFD_ASSERT((frag->offset + frag->size + frag->offset_adjust
++ + frag->size_adjust) == sec->size);
++ else
++ BFD_ASSERT((frag->offset + frag->size + frag->offset_adjust
++ + frag->size_adjust)
++ == (frag[1].offset + frag[1].offset_adjust));
++ }
++ }
++
++ /* Adjust reloc addends and DIFF32 differences */
++ if (!adjust_relocs(abfd, sec, info))
++ return FALSE;
++
++ ret = TRUE;
++
++ out:
++ release_contents(sec, contents);
++ release_internal_relocs(sec, relocs);
++ return ret;
++}
++
++static bfd_boolean
++avr32_elf_relax_section(bfd *abfd, asection *sec,
++ struct bfd_link_info *info, bfd_boolean *again)
++{
++ struct elf_avr32_link_hash_table *htab;
++ struct avr32_relax_data *rd;
++
++ *again = FALSE;
++ if (info->relocatable)
++ return TRUE;
++
++ htab = avr32_elf_hash_table(info);
++ if ((!(sec->flags & SEC_RELOC) || sec->reloc_count == 0)
++ && sec != htab->sgot)
++ return TRUE;
++
++ if (!htab->relocations_analyzed)
++ {
++ if (!analyze_relocations(info))
++ return FALSE;
++ htab->relocations_analyzed = TRUE;
++ }
++
++ rd = avr32_relax_data(sec);
++
++ if (rd->iteration != htab->relax_iteration)
++ {
++ if (!htab->repeat_pass)
++ htab->relax_pass++;
++ htab->relax_iteration++;
++ htab->repeat_pass = FALSE;
++ }
++
++ rd->iteration++;
++
++ switch (htab->relax_pass)
++ {
++ case RELAX_PASS_SIZE_FRAGS:
++ if (!avr32_size_frags(abfd, sec, info))
++ return FALSE;
++ *again = TRUE;
++ break;
++ case RELAX_PASS_MOVE_DATA:
++ if (!avr32_move_data(abfd, sec, info))
++ return FALSE;
++ break;
++ }
++
++ return TRUE;
++}
++
++
++/* Relocation */
++
++static bfd_reloc_status_type
++avr32_check_reloc_value(asection *sec, Elf_Internal_Rela *rela,
++ bfd_signed_vma relocation, reloc_howto_type *howto);
++static bfd_reloc_status_type
++avr32_final_link_relocate(reloc_howto_type *howto, bfd *input_bfd,
++ asection *input_section, bfd_byte *contents,
++ Elf_Internal_Rela *rel, bfd_vma value);
++static bfd_boolean
++avr32_elf_relocate_section(bfd *output_bfd, struct bfd_link_info *info,
++ bfd *input_bfd, asection *input_section,
++ bfd_byte *contents, Elf_Internal_Rela *relocs,
++ Elf_Internal_Sym *local_syms,
++ asection **local_sections);
++
++
++#define symbol_address(symbol) \
++ symbol->value + symbol->section->output_section->vma \
++ + symbol->section->output_offset
++
++#define avr32_elf_insert_field(size, field, abfd, reloc_entry, data) \
++ do \
++ { \
++ unsigned long x; \
++ x = bfd_get_##size (abfd, data + reloc_entry->address); \
++ x &= ~reloc_entry->howto->dst_mask; \
++ x |= field & reloc_entry->howto->dst_mask; \
++ bfd_put_##size (abfd, (bfd_vma) x, data + reloc_entry->address); \
++ } \
++ while(0)
++
++static bfd_reloc_status_type
++avr32_check_reloc_value(asection *sec ATTRIBUTE_UNUSED,
++ Elf_Internal_Rela *rela ATTRIBUTE_UNUSED,
++ bfd_signed_vma relocation,
++ reloc_howto_type *howto)
++{
++ bfd_vma reloc_u;
++
++ /* We take "complain_overflow_dont" to mean "don't complain on
++ alignment either". This way, we don't have to special-case
++ R_AVR32_HI16 */
++ if (howto->complain_on_overflow == complain_overflow_dont)
++ return bfd_reloc_ok;
++
++ /* Check if the value is correctly aligned */
++ if (relocation & ((1 << howto->rightshift) - 1))
++ {
++ RDBG("misaligned: %s<%s+%lx>: %s: 0x%lx (align %u)\n",
++ sec->owner->filename, sec->name, rela->r_offset,
++ howto->name, relocation, howto->rightshift);
++ return bfd_reloc_overflow;
++ }
++
++ /* Now, get rid of the unnecessary bits */
++ relocation >>= howto->rightshift;
++ reloc_u = (bfd_vma)relocation;
++
++ switch (howto->complain_on_overflow)
++ {
++ case complain_overflow_unsigned:
++ case complain_overflow_bitfield:
++ if (reloc_u > (unsigned long)((1 << howto->bitsize) - 1))
++ {
++ RDBG("unsigned overflow: %s<%s+%lx>: %s: 0x%lx (size %u)\n",
++ sec->owner->filename, sec->name, rela->r_offset,
++ howto->name, reloc_u, howto->bitsize);
++ RDBG("reloc vma: 0x%lx\n",
++ sec->output_section->vma + sec->output_offset + rela->r_offset);
++
++ return bfd_reloc_overflow;
++ }
++ break;
++ case complain_overflow_signed:
++ if (relocation > (1 << (howto->bitsize - 1)) - 1)
++ {
++ RDBG("signed overflow: %s<%s+%lx>: %s: 0x%lx (size %u)\n",
++ sec->owner->filename, sec->name, rela->r_offset,
++ howto->name, reloc_u, howto->bitsize);
++ RDBG("reloc vma: 0x%lx\n",
++ sec->output_section->vma + sec->output_offset + rela->r_offset);
++
++ return bfd_reloc_overflow;
++ }
++ if (relocation < -(1 << (howto->bitsize - 1)))
++ {
++ RDBG("signed overflow: %s<%s+%lx>: %s: -0x%lx (size %u)\n",
++ sec->owner->filename, sec->name, rela->r_offset,
++ howto->name, -relocation, howto->bitsize);
++ RDBG("reloc vma: 0x%lx\n",
++ sec->output_section->vma + sec->output_offset + rela->r_offset);
++
++ return bfd_reloc_overflow;
++ }
++ break;
++ default:
++ abort();
++ }
++
++ return bfd_reloc_ok;
++}
++
++
++static bfd_reloc_status_type
++avr32_final_link_relocate(reloc_howto_type *howto,
++ bfd *input_bfd,
++ asection *input_section,
++ bfd_byte *contents,
++ Elf_Internal_Rela *rel,
++ bfd_vma value)
++{
++ bfd_vma field;
++ bfd_vma relocation;
++ bfd_reloc_status_type status;
++ bfd_byte *p = contents + rel->r_offset;
++ unsigned long x;
++
++ pr_debug(" (6b) final link relocate\n");
++
++ /* Sanity check the address */
++ if (rel->r_offset > input_section->size)
++ {
++ (*_bfd_error_handler)
++ ("%B: %A+0x%lx: offset out of range (section size: 0x%lx)",
++ input_bfd, input_section, rel->r_offset, input_section->size);
++ return bfd_reloc_outofrange;
++ }
++
++ relocation = value + rel->r_addend;
++
++ if (howto->pc_relative)
++ {
++ bfd_vma addr;
++
++ addr = input_section->output_section->vma
++ + input_section->output_offset + rel->r_offset;
++ addr &= ~0UL << howto->rightshift;
++ relocation -= addr;
++ }
++
++ switch (ELF32_R_TYPE(rel->r_info))
++ {
++ case R_AVR32_16N_PCREL:
++ /* sub reg, pc, . - (sym + addend) */
++ relocation = -relocation;
++ break;
++ }
++
++ status = avr32_check_reloc_value(input_section, rel, relocation, howto);
++
++ relocation >>= howto->rightshift;
++ if (howto->bitsize == 21)
++ field = (relocation & 0xffff)
++ | ((relocation & 0x10000) << 4)
++ | ((relocation & 0x1e0000) << 8);
++ else if (howto->bitsize == 12)
++ field = (relocation & 0xff) | ((relocation & 0xf00) << 4);
++ else if (howto->bitsize == 10)
++ field = ((relocation & 0xff) << 4)
++ | ((relocation & 0x300) >> 8);
++ else
++ field = relocation << howto->bitpos;
++
++ switch (howto->size)
++ {
++ case 0:
++ x = bfd_get_8 (input_bfd, p);
++ x &= ~howto->dst_mask;
++ x |= field & howto->dst_mask;
++ bfd_put_8 (input_bfd, (bfd_vma) x, p);
++ break;
++ case 1:
++ x = bfd_get_16 (input_bfd, p);
++ x &= ~howto->dst_mask;
++ x |= field & howto->dst_mask;
++ bfd_put_16 (input_bfd, (bfd_vma) x, p);
++ break;
++ case 2:
++ x = bfd_get_32 (input_bfd, p);
++ x &= ~howto->dst_mask;
++ x |= field & howto->dst_mask;
++ bfd_put_32 (input_bfd, (bfd_vma) x, p);
++ break;
++ default:
++ abort();
++ }
++
++ return status;
++}
++
++/* (6) Apply relocations to the normal (non-dynamic) sections */
++
++static bfd_boolean
++avr32_elf_relocate_section(bfd *output_bfd, struct bfd_link_info *info,
++ bfd *input_bfd, asection *input_section,
++ bfd_byte *contents, Elf_Internal_Rela *relocs,
++ Elf_Internal_Sym *local_syms,
++ asection **local_sections)
++{
++ struct elf_avr32_link_hash_table *htab;
++ Elf_Internal_Shdr *symtab_hdr;
++ Elf_Internal_Rela *rel, *relend;
++ struct elf_link_hash_entry **sym_hashes;
++ struct got_entry **local_got_ents;
++ asection *sgot;
++ asection *srelgot;
++
++ pr_debug("(6) relocate section %s:<%s> (size 0x%lx)\n",
++ input_bfd->filename, input_section->name, input_section->size);
++
++ /* If we're doing a partial link, we don't have to do anything since
++ we're using RELA relocations */
++ if (info->relocatable)
++ return TRUE;
++
++ htab = avr32_elf_hash_table(info);
++ symtab_hdr = &elf_tdata(input_bfd)->symtab_hdr;
++ sym_hashes = elf_sym_hashes(input_bfd);
++ local_got_ents = elf_local_got_ents(input_bfd);
++ sgot = htab->sgot;
++ srelgot = htab->srelgot;
++
++ relend = relocs + input_section->reloc_count;
++ for (rel = relocs; rel < relend; rel++)
++ {
++ unsigned long r_type, r_symndx;
++ reloc_howto_type *howto;
++ Elf_Internal_Sym *sym = NULL;
++ struct elf_link_hash_entry *h = NULL;
++ asection *sec = NULL;
++ bfd_vma value;
++ bfd_vma offset;
++ bfd_reloc_status_type status;
++
++ r_type = ELF32_R_TYPE(rel->r_info);
++ r_symndx = ELF32_R_SYM(rel->r_info);
++
++ if (r_type == R_AVR32_NONE
++ || r_type == R_AVR32_ALIGN
++ || r_type == R_AVR32_DIFF32
++ || r_type == R_AVR32_DIFF16
++ || r_type == R_AVR32_DIFF8)
++ continue;
++
++ /* Sanity check */
++ if (r_type > R_AVR32_max)
++ {
++ bfd_set_error(bfd_error_bad_value);
++ return FALSE;
++ }
++
++ howto = &elf_avr32_howto_table[r_type];
++
++ if (r_symndx < symtab_hdr->sh_info)
++ {
++ sym = local_syms + r_symndx;
++ sec = local_sections[r_symndx];
++
++ pr_debug(" (6a) processing %s against local symbol %lu\n",
++ howto->name, r_symndx);
++
++ /* The following function changes rel->r_addend behind our back. */
++ value = _bfd_elf_rela_local_sym(output_bfd, sym, &sec, rel);
++ pr_debug(" => value: %lx, addend: %lx\n", value, rel->r_addend);
++ }
++ else
++ {
++ if (sym_hashes == NULL)
++ return FALSE;
++
++ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *)h->root.u.i.link;
++
++ pr_debug(" (6a) processing %s against symbol %s\n",
++ howto->name, h->root.root.string);
++
++ if (h->root.type == bfd_link_hash_defined
++ || h->root.type == bfd_link_hash_defweak)
++ {
++ bfd_boolean dyn;
++
++ dyn = htab->root.dynamic_sections_created;
++ sec = h->root.u.def.section;
++
++ if (sec->output_section)
++ value = (h->root.u.def.value
++ + sec->output_section->vma
++ + sec->output_offset);
++ else
++ value = h->root.u.def.value;
++ }
++ else if (h->root.type == bfd_link_hash_undefweak)
++ value = 0;
++ else if (info->unresolved_syms_in_objects == RM_IGNORE
++ && ELF_ST_VISIBILITY(h->other) == STV_DEFAULT)
++ value = 0;
++ else
++ {
++ bfd_boolean err;
++ err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
++ || ELF_ST_VISIBILITY(h->other) != STV_DEFAULT);
++ if (!info->callbacks->undefined_symbol
++ (info, h->root.root.string, input_bfd,
++ input_section, rel->r_offset, err))
++ return FALSE;
++ value = 0;
++ }
++
++ pr_debug(" => value: %lx, addend: %lx\n", value, rel->r_addend);
++ }
++
++ switch (r_type)
++ {
++ case R_AVR32_GOT32:
++ case R_AVR32_GOT16:
++ case R_AVR32_GOT8:
++ case R_AVR32_GOT21S:
++ case R_AVR32_GOT18SW:
++ case R_AVR32_GOT16S:
++ case R_AVR32_GOT7UW:
++ case R_AVR32_LDA_GOT:
++ case R_AVR32_GOTCALL:
++ BFD_ASSERT(sgot != NULL);
++
++ if (h != NULL)
++ {
++ BFD_ASSERT(h->got.glist->refcount > 0);
++ offset = h->got.glist->offset;
++
++ BFD_ASSERT(offset < sgot->size);
++ if (!elf_hash_table(info)->dynamic_sections_created
++ || (h->def_regular
++ && (!info->shared
++ || info->symbolic
++ || h->dynindx == -1)))
++ {
++ /* This is actually a static link, or it is a
++ -Bsymbolic link and the symbol is defined
++ locally, or the symbol was forced to be local. */
++ bfd_put_32(output_bfd, value, sgot->contents + offset);
++ }
++ }
++ else
++ {
++ BFD_ASSERT(local_got_ents &&
++ local_got_ents[r_symndx]->refcount > 0);
++ offset = local_got_ents[r_symndx]->offset;
++
++ /* Local GOT entries don't have relocs. If this is a
++ shared library, the dynamic linker will add the load
++ address to the initial value at startup. */
++ BFD_ASSERT(offset < sgot->size);
++ pr_debug("Initializing GOT entry at offset %lu: 0x%lx\n",
++ offset, value);
++ bfd_put_32 (output_bfd, value, sgot->contents + offset);
++ }
++
++ value = sgot->output_offset + offset;
++ pr_debug("GOT reference: New value %lx\n", value);
++ break;
++
++ case R_AVR32_GOTPC:
++ /* This relocation type is for constant pool entries used in
++ the calculation "Rd = PC - (PC - GOT)", where the
++ constant pool supplies the constant (PC - GOT)
++ offset. The symbol value + addend indicates where the
++ value of PC is taken. */
++ value -= sgot->output_section->vma;
++ break;
++
++ case R_AVR32_32_PCREL:
++ /* We must adjust r_offset to account for discarded data in
++ the .eh_frame section. This is probably not the right
++ way to do this, since AFAICS all other architectures do
++ it some other way. I just can't figure out how... */
++ {
++ bfd_vma r_offset;
++
++ r_offset = _bfd_elf_section_offset(output_bfd, info,
++ input_section,
++ rel->r_offset);
++ if (r_offset == (bfd_vma)-1
++ || r_offset == (bfd_vma)-2)
++ continue;
++ rel->r_offset = r_offset;
++ }
++ break;
++
++ case R_AVR32_32:
++ /* We need to emit a run-time relocation in the following cases:
++ - we're creating a shared library
++ - the symbol is not defined in any regular objects
++
++ Of course, sections that aren't going to be part of the
++ run-time image will not get any relocs, and undefined
++ symbols won't have any either (only weak undefined
++ symbols should get this far). */
++ if ((info->shared
++ || (elf_hash_table(info)->dynamic_sections_created
++ && h != NULL
++ && h->def_dynamic
++ && !h->def_regular))
++ && r_symndx != 0
++ && (input_section->flags & SEC_ALLOC))
++ {
++ Elf_Internal_Rela outrel;
++ bfd_byte *loc;
++ bfd_boolean skip, relocate;
++ struct elf_avr32_link_hash_entry *avrh;
++
++ pr_debug("Going to generate dynamic reloc...\n");
++
++ skip = FALSE;
++ relocate = FALSE;
++
++ outrel.r_offset = _bfd_elf_section_offset(output_bfd, info,
++ input_section,
++ rel->r_offset);
++ if (outrel.r_offset == (bfd_vma)-1)
++ skip = TRUE;
++ else if (outrel.r_offset == (bfd_vma)-2)
++ skip = TRUE, relocate = TRUE;
++
++ outrel.r_offset += (input_section->output_section->vma
++ + input_section->output_offset);
++
++ pr_debug(" ... offset %lx, dynindx %ld\n",
++ outrel.r_offset, h ? h->dynindx : -1);
++
++ if (skip)
++ memset(&outrel, 0, sizeof(outrel));
++ else
++ {
++ avrh = (struct elf_avr32_link_hash_entry *)h;
++ /* h->dynindx may be -1 if this symbol was marked to
++ become local. */
++ if (h == NULL
++ || ((info->symbolic || h->dynindx == -1)
++ && h->def_regular))
++ {
++ relocate = TRUE;
++ outrel.r_info = ELF32_R_INFO(0, R_AVR32_RELATIVE);
++ outrel.r_addend = value + rel->r_addend;
++ pr_debug(" ... R_AVR32_RELATIVE\n");
++ }
++ else
++ {
++ BFD_ASSERT(h->dynindx != -1);
++ relocate = TRUE;
++ outrel.r_info = ELF32_R_INFO(h->dynindx, R_AVR32_GLOB_DAT);
++ outrel.r_addend = rel->r_addend;
++ pr_debug(" ... R_AVR32_GLOB_DAT\n");
++ }
++ }
++
++ pr_debug("srelgot reloc_count: %d, size %lu\n",
++ srelgot->reloc_count, srelgot->size);
++
++ loc = srelgot->contents;
++ loc += srelgot->reloc_count++ * sizeof(Elf32_External_Rela);
++ bfd_elf32_swap_reloca_out(output_bfd, &outrel, loc);
++
++ BFD_ASSERT(srelgot->reloc_count * sizeof(Elf32_External_Rela)
++ <= srelgot->size);
++
++ if (!relocate)
++ continue;
++ }
++ break;
++ }
++
++ status = avr32_final_link_relocate(howto, input_bfd, input_section,
++ contents, rel, value);
++
++ switch (status)
++ {
++ case bfd_reloc_ok:
++ break;
++
++ case bfd_reloc_overflow:
++ {
++ const char *name;
++
++ if (h != NULL)
++ name = h->root.root.string;
++ else
++ {
++ name = bfd_elf_string_from_elf_section(input_bfd,
++ symtab_hdr->sh_link,
++ sym->st_name);
++ if (name == NULL)
++ return FALSE;
++ if (*name == '\0')
++ name = bfd_section_name(input_bfd, sec);
++ }
++ if (!((*info->callbacks->reloc_overflow)
++ (info, (h ? &h->root : NULL), name, howto->name,
++ rel->r_addend, input_bfd, input_section, rel->r_offset)))
++ return FALSE;
++ }
++ break;
++
++ case bfd_reloc_outofrange:
++ default:
++ abort();
++ }
++ }
++
++ return TRUE;
++}
++
++
++/* Additional processing of dynamic sections after relocation */
++
++static bfd_boolean
++avr32_elf_finish_dynamic_symbol(bfd *output_bfd, struct bfd_link_info *info,
++ struct elf_link_hash_entry *h,
++ Elf_Internal_Sym *sym);
++static bfd_boolean
++avr32_elf_finish_dynamic_sections(bfd *output_bfd, struct bfd_link_info *info);
++
++
++/* (7) Initialize the contents of a dynamic symbol and/or emit
++ relocations for it */
++
++static bfd_boolean
++avr32_elf_finish_dynamic_symbol(bfd *output_bfd, struct bfd_link_info *info,
++ struct elf_link_hash_entry *h,
++ Elf_Internal_Sym *sym)
++{
++ struct elf_avr32_link_hash_table *htab;
++ struct got_entry *got;
++
++ pr_debug("(7) finish dynamic symbol: %s\n", h->root.root.string);
++
++ htab = avr32_elf_hash_table(info);
++ got = h->got.glist;
++
++ if (got && got->refcount > 0)
++ {
++ asection *sgot;
++ asection *srelgot;
++ Elf_Internal_Rela rel;
++ bfd_byte *loc;
++
++ /* This symbol has an entry in the GOT. Set it up. */
++ sgot = htab->sgot;
++ srelgot = htab->srelgot;
++ BFD_ASSERT(sgot && srelgot);
++
++ rel.r_offset = (sgot->output_section->vma
++ + sgot->output_offset
++ + got->offset);
++
++ /* If this is a static link, or it is a -Bsymbolic link and the
++ symbol is defined locally or was forced to be local because
++ of a version file, we just want to emit a RELATIVE reloc. The
++ entry in the global offset table will already have been
++ initialized in the relocate_section function. */
++ if ((info->shared
++ && !info->symbolic
++ && h->dynindx != -1)
++ || (htab->root.dynamic_sections_created
++ && h->def_dynamic
++ && !h->def_regular))
++ {
++ bfd_put_32(output_bfd, 0, sgot->contents + got->offset);
++ rel.r_info = ELF32_R_INFO(h->dynindx, R_AVR32_GLOB_DAT);
++ rel.r_addend = 0;
++
++ pr_debug("GOT reloc R_AVR32_GLOB_DAT, dynindx: %ld\n", h->dynindx);
++ pr_debug(" srelgot reloc_count: %d, size: %lu\n",
++ srelgot->reloc_count, srelgot->size);
++
++ loc = (srelgot->contents
++ + srelgot->reloc_count++ * sizeof(Elf32_External_Rela));
++ bfd_elf32_swap_reloca_out(output_bfd, &rel, loc);
++
++ BFD_ASSERT(srelgot->reloc_count * sizeof(Elf32_External_Rela)
++ <= srelgot->size);
++ }
++ }
++
++ /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute */
++ if (strcmp(h->root.root.string, "_DYNAMIC") == 0
++ || strcmp(h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
++ sym->st_shndx = SHN_ABS;
++
++ return TRUE;
++}
++
++/* (8) Do any remaining initialization of the dynamic sections */
++
++static bfd_boolean
++avr32_elf_finish_dynamic_sections(bfd *output_bfd, struct bfd_link_info *info)
++{
++ struct elf_avr32_link_hash_table *htab;
++ asection *sgot, *sdyn;
++
++ pr_debug("(8) finish dynamic sections\n");
++
++ htab = avr32_elf_hash_table(info);
++ sgot = htab->sgot;
++ sdyn = bfd_get_section_by_name(htab->root.dynobj, ".dynamic");
++
++ if (htab->root.dynamic_sections_created)
++ {
++ Elf32_External_Dyn *dyncon, *dynconend;
++
++ BFD_ASSERT(sdyn && sgot && sgot->size >= AVR32_GOT_HEADER_SIZE);
++
++ dyncon = (Elf32_External_Dyn *)sdyn->contents;
++ dynconend = (Elf32_External_Dyn *)(sdyn->contents + sdyn->size);
++ for (; dyncon < dynconend; dyncon++)
++ {
++ Elf_Internal_Dyn dyn;
++ asection *s;
++
++ bfd_elf32_swap_dyn_in(htab->root.dynobj, dyncon, &dyn);
++
++ switch (dyn.d_tag)
++ {
++ default:
++ break;
++
++ case DT_PLTGOT:
++ s = sgot->output_section;
++ BFD_ASSERT(s != NULL);
++ dyn.d_un.d_ptr = s->vma;
++ bfd_elf32_swap_dyn_out(output_bfd, &dyn, dyncon);
++ break;
++
++ case DT_AVR32_GOTSZ:
++ s = sgot->output_section;
++ BFD_ASSERT(s != NULL);
++ dyn.d_un.d_val = s->size;
++ bfd_elf32_swap_dyn_out(output_bfd, &dyn, dyncon);
++ break;
++ }
++ }
++
++ /* Fill in the first two entries in the global offset table */
++ bfd_put_32(output_bfd,
++ sdyn->output_section->vma + sdyn->output_offset,
++ sgot->contents);
++
++ /* The runtime linker will fill this one in with the address of
++ the run-time link map */
++ bfd_put_32(output_bfd, 0, sgot->contents + 4);
++ }
++
++ if (sgot)
++ elf_section_data(sgot->output_section)->this_hdr.sh_entsize = 4;
++
++ return TRUE;
++}
++
++
++/* AVR32-specific private ELF data */
++
++static bfd_boolean
++avr32_elf_set_private_flags(bfd *abfd, flagword flags);
++static bfd_boolean
++avr32_elf_copy_private_bfd_data(bfd *ibfd, bfd *obfd);
++static bfd_boolean
++avr32_elf_merge_private_bfd_data(bfd *ibfd, bfd *obfd);
++static bfd_boolean
++avr32_elf_print_private_bfd_data(bfd *abfd, void *ptr);
++
++static bfd_boolean
++avr32_elf_set_private_flags(bfd *abfd, flagword flags)
++{
++ elf_elfheader(abfd)->e_flags = flags;
++ elf_flags_init(abfd) = TRUE;
++
++ return TRUE;
++}
++
++/* Copy backend specific data from one object module to another. */
++
++static bfd_boolean
++avr32_elf_copy_private_bfd_data(bfd *ibfd, bfd *obfd)
++{
++ elf_elfheader(obfd)->e_flags = elf_elfheader(ibfd)->e_flags;
++ return TRUE;
++}
++
++/* Merge backend specific data from an object file to the output
++ object file when linking. */
++
++static bfd_boolean
++avr32_elf_merge_private_bfd_data(bfd *ibfd, bfd *obfd)
++{
++ flagword out_flags, in_flags;
++
++ pr_debug("(0) merge_private_bfd_data: %s -> %s\n",
++ ibfd->filename, obfd->filename);
++
++ in_flags = elf_elfheader(ibfd)->e_flags;
++ out_flags = elf_elfheader(obfd)->e_flags;
++
++ if (elf_flags_init(obfd))
++ {
++ /* If one of the inputs are non-PIC, the output must be
++ considered non-PIC. The same applies to linkrelax. */
++ if (!(in_flags & EF_AVR32_PIC))
++ out_flags &= ~EF_AVR32_PIC;
++ if (!(in_flags & EF_AVR32_LINKRELAX))
++ out_flags &= ~EF_AVR32_LINKRELAX;
++ }
++ else
++ {
++ elf_flags_init(obfd) = TRUE;
++ out_flags = in_flags;
++ }
++
++ elf_elfheader(obfd)->e_flags = out_flags;
++
++ return TRUE;
++}
++
++static bfd_boolean
++avr32_elf_print_private_bfd_data(bfd *abfd, void *ptr)
++{
++ FILE *file = (FILE *)ptr;
++ unsigned long flags;
++
++ BFD_ASSERT(abfd != NULL && ptr != NULL);
++
++ _bfd_elf_print_private_bfd_data(abfd, ptr);
++
++ flags = elf_elfheader(abfd)->e_flags;
++
++ fprintf(file, _("private flags = %lx:"), elf_elfheader(abfd)->e_flags);
++
++ if (flags & EF_AVR32_PIC)
++ fprintf(file, " [PIC]");
++ if (flags & EF_AVR32_LINKRELAX)
++ fprintf(file, " [linker relaxable]");
++
++ flags &= ~(EF_AVR32_PIC | EF_AVR32_LINKRELAX);
++
++ if (flags)
++ fprintf(file, _("<Unrecognized flag bits set>"));
++
++ fputc('\n', file);
++
++ return TRUE;
++}
++
++/* Set avr32-specific linker options. */
++void bfd_elf32_avr32_set_options(struct bfd_link_info *info,
++ int direct_data_refs)
++{
++ struct elf_avr32_link_hash_table *htab;
++
++ htab = avr32_elf_hash_table (info);
++ htab->direct_data_refs = !!direct_data_refs;
++}
++
++
++
++/* Understanding core dumps */
++
++static bfd_boolean
++avr32_elf_grok_prstatus(bfd *abfd, Elf_Internal_Note *note);
++static bfd_boolean
++avr32_elf_grok_psinfo(bfd *abfd, Elf_Internal_Note *note);
++
++static bfd_boolean
++avr32_elf_grok_prstatus(bfd *abfd, Elf_Internal_Note *note)
++{
++ /* Linux/AVR32B elf_prstatus */
++ if (note->descsz != 148)
++ return FALSE;
++
++ /* pr_cursig */
++ elf_tdata(abfd)->core_signal = bfd_get_16(abfd, note->descdata + 12);
++
++ /* pr_pid */
++ elf_tdata(abfd)->core_pid = bfd_get_32(abfd, note->descdata + 24);
++
++ /* Make a ".reg/999" section for pr_reg. The size is for 16
++ general-purpose registers, SR and r12_orig (18 * 4 = 72). */
++ return _bfd_elfcore_make_pseudosection(abfd, ".reg", 72,
++ note->descpos + 72);
++}
++
++static bfd_boolean
++avr32_elf_grok_psinfo(bfd *abfd, Elf_Internal_Note *note)
++{
++ /* Linux/AVR32B elf_prpsinfo */
++ if (note->descsz != 128)
++ return FALSE;
++
++ elf_tdata(abfd)->core_program
++ = _bfd_elfcore_strndup(abfd, note->descdata + 32, 16);
++ elf_tdata(abfd)->core_command
++ = _bfd_elfcore_strndup(abfd, note->descdata + 48, 80);
++
++ /* Note that for some reason, a spurious space is tacked
++ onto the end of the args in some (at least one anyway)
++ implementations, so strip it off if it exists. */
++
++ {
++ char *command = elf_tdata (abfd)->core_command;
++ int n = strlen (command);
++
++ if (0 < n && command[n - 1] == ' ')
++ command[n - 1] = '\0';
++ }
++
++ return TRUE;
++}
++
++
++#define ELF_ARCH bfd_arch_avr32
++#define ELF_MACHINE_CODE EM_AVR32
++#define ELF_MAXPAGESIZE 1024
++
++#define TARGET_BIG_SYM bfd_elf32_avr32_vec
++#define TARGET_BIG_NAME "elf32-avr32"
++
++#define elf_backend_grok_prstatus avr32_elf_grok_prstatus
++#define elf_backend_grok_psinfo avr32_elf_grok_psinfo
++
++/* Only RELA relocations are used */
++#define elf_backend_may_use_rel_p 0
++#define elf_backend_may_use_rela_p 1
++#define elf_backend_default_use_rela_p 1
++#define elf_backend_rela_normal 1
++#define elf_info_to_howto_rel NULL
++#define elf_info_to_howto avr32_info_to_howto
++
++#define bfd_elf32_bfd_copy_private_bfd_data avr32_elf_copy_private_bfd_data
++#define bfd_elf32_bfd_merge_private_bfd_data avr32_elf_merge_private_bfd_data
++#define bfd_elf32_bfd_set_private_flags avr32_elf_set_private_flags
++#define bfd_elf32_bfd_print_private_bfd_data avr32_elf_print_private_bfd_data
++#define bfd_elf32_new_section_hook avr32_elf_new_section_hook
++
++#define elf_backend_gc_mark_hook avr32_elf_gc_mark_hook
++#define elf_backend_gc_sweep_hook avr32_elf_gc_sweep_hook
++#define elf_backend_relocate_section avr32_elf_relocate_section
++#define elf_backend_copy_indirect_symbol avr32_elf_copy_indirect_symbol
++#define elf_backend_create_dynamic_sections avr32_elf_create_dynamic_sections
++#define bfd_elf32_bfd_link_hash_table_create avr32_elf_link_hash_table_create
++#define elf_backend_adjust_dynamic_symbol avr32_elf_adjust_dynamic_symbol
++#define elf_backend_size_dynamic_sections avr32_elf_size_dynamic_sections
++#define elf_backend_finish_dynamic_symbol avr32_elf_finish_dynamic_symbol
++#define elf_backend_finish_dynamic_sections avr32_elf_finish_dynamic_sections
++
++#define bfd_elf32_bfd_relax_section avr32_elf_relax_section
++
++/* Find out which symbols need an entry in .got. */
++#define elf_backend_check_relocs avr32_check_relocs
++#define elf_backend_can_refcount 1
++#define elf_backend_can_gc_sections 1
++#define elf_backend_plt_readonly 1
++#define elf_backend_plt_not_loaded 1
++#define elf_backend_want_plt_sym 0
++#define elf_backend_plt_alignment 2
++#define elf_backend_want_dynbss 0
++#define elf_backend_want_got_plt 0
++#define elf_backend_want_got_sym 1
++#define elf_backend_got_header_size AVR32_GOT_HEADER_SIZE
++
++#include "elf32-target.h"
+--- /dev/null
++++ b/bfd/elf32-avr32.h
+@@ -0,0 +1,23 @@
++/* AVR32-specific support for 32-bit ELF.
++ Copyright 2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++void bfd_elf32_avr32_set_options(struct bfd_link_info *info,
++ int direct_data_refs);
+--- a/bfd/elf-bfd.h
++++ b/bfd/elf-bfd.h
+@@ -1503,6 +1503,10 @@ struct elf_obj_tdata
+ find_nearest_line. */
+ struct mips_elf_find_line *find_line_info;
+
++ /* Used by AVR32 ELF relaxation code. Contains an array of pointers
++ for each local symbol to the fragment where it is defined. */
++ struct fragment **local_sym_frag;
++
+ /* A place to stash dwarf1 info for this bfd. */
+ struct dwarf1_debug *dwarf1_find_line_info;
+
+--- a/bfd/Makefile.am
++++ b/bfd/Makefile.am
+@@ -73,6 +73,7 @@ ALL_MACHINES = \
+ cpu-arc.lo \
+ cpu-arm.lo \
+ cpu-avr.lo \
++ cpu-avr32.lo \
+ cpu-bfin.lo \
+ cpu-cr16.lo \
+ cpu-cr16c.lo \
+@@ -269,6 +270,7 @@ BFD32_BACKENDS = \
+ elf32-arc.lo \
+ elf32-arm.lo \
+ elf32-avr.lo \
++ elf32-avr32.lo \
+ elf32-bfin.lo \
+ elf32-cr16.lo \
+ elf32-cr16c.lo \
+--- a/bfd/reloc.c
++++ b/bfd/reloc.c
+@@ -4052,6 +4052,131 @@ ENUMDOC
+ instructions
+
+ ENUM
++ BFD_RELOC_AVR32_DIFF32
++ENUMX
++ BFD_RELOC_AVR32_DIFF16
++ENUMX
++ BFD_RELOC_AVR32_DIFF8
++ENUMDOC
++ Difference between two labels: L2 - L1. The value of L1 is encoded
++ as sym + addend, while the initial difference after assembly is
++ inserted into the object file by the assembler.
++ENUM
++ BFD_RELOC_AVR32_GOT32
++ENUMX
++ BFD_RELOC_AVR32_GOT16
++ENUMX
++ BFD_RELOC_AVR32_GOT8
++ENUMDOC
++ Reference to a symbol through the Global Offset Table. The linker
++ will allocate an entry for symbol in the GOT and insert the offset
++ of this entry as the relocation value.
++ENUM
++ BFD_RELOC_AVR32_21S
++ENUMX
++ BFD_RELOC_AVR32_16U
++ENUMX
++ BFD_RELOC_AVR32_16S
++ENUMX
++ BFD_RELOC_AVR32_SUB5
++ENUMX
++ BFD_RELOC_AVR32_8S_EXT
++ENUMX
++ BFD_RELOC_AVR32_8S
++ENUMX
++ BFD_RELOC_AVR32_15S
++ENUMDOC
++ Normal (non-pc-relative) code relocations. Alignment and signedness
++ is indicated by the suffixes. S means signed, U means unsigned. W
++ means word-aligned, H means halfword-aligned, neither means
++ byte-aligned (no alignment.) SUB5 is the same relocation as 16S.
++ENUM
++ BFD_RELOC_AVR32_22H_PCREL
++ENUMX
++ BFD_RELOC_AVR32_18W_PCREL
++ENUMX
++ BFD_RELOC_AVR32_16B_PCREL
++ENUMX
++ BFD_RELOC_AVR32_16N_PCREL
++ENUMX
++ BFD_RELOC_AVR32_14UW_PCREL
++ENUMX
++ BFD_RELOC_AVR32_11H_PCREL
++ENUMX
++ BFD_RELOC_AVR32_10UW_PCREL
++ENUMX
++ BFD_RELOC_AVR32_9H_PCREL
++ENUMX
++ BFD_RELOC_AVR32_9UW_PCREL
++ENUMDOC
++ PC-relative relocations are signed if neither 'U' nor 'S' is
++ specified. However, we explicitly tack on a 'B' to indicate no
++ alignment, to avoid confusion with data relocs. All of these resolve
++ to sym + addend - offset, except the one with 'N' (negated) suffix.
++ This particular one resolves to offset - sym - addend.
++ENUM
++ BFD_RELOC_AVR32_GOTPC
++ENUMDOC
++ Subtract the link-time address of the GOT from (symbol + addend)
++ and insert the result.
++ENUM
++ BFD_RELOC_AVR32_GOTCALL
++ENUMX
++ BFD_RELOC_AVR32_LDA_GOT
++ENUMX
++ BFD_RELOC_AVR32_GOT21S
++ENUMX
++ BFD_RELOC_AVR32_GOT18SW
++ENUMX
++ BFD_RELOC_AVR32_GOT16S
++ENUMDOC
++ Reference to a symbol through the GOT. The linker will allocate an
++ entry for symbol in the GOT and insert the offset of this entry as
++ the relocation value. addend must be zero. As usual, 'S' means
++ signed, 'W' means word-aligned, etc.
++ENUM
++ BFD_RELOC_AVR32_32_CPENT
++ENUMDOC
++ 32-bit constant pool entry. I don't think 8- and 16-bit entries make
++ a whole lot of sense.
++ENUM
++ BFD_RELOC_AVR32_CPCALL
++ENUMX
++ BFD_RELOC_AVR32_16_CP
++ENUMX
++ BFD_RELOC_AVR32_9W_CP
++ENUMDOC
++ Constant pool references. Some of these relocations are signed,
++ others are unsigned. It doesn't really matter, since the constant
++ pool always comes after the code that references it.
++ENUM
++ BFD_RELOC_AVR32_ALIGN
++ENUMDOC
++ sym must be the absolute symbol. The addend specifies the alignment
++ order, e.g. if addend is 2, the linker must add padding so that the
++ next address is aligned to a 4-byte boundary.
++ENUM
++ BFD_RELOC_AVR32_14UW
++ENUMX
++ BFD_RELOC_AVR32_10UW
++ENUMX
++ BFD_RELOC_AVR32_10SW
++ENUMX
++ BFD_RELOC_AVR32_STHH_W
++ENUMX
++ BFD_RELOC_AVR32_7UW
++ENUMX
++ BFD_RELOC_AVR32_6S
++ENUMX
++ BFD_RELOC_AVR32_6UW
++ENUMX
++ BFD_RELOC_AVR32_4UH
++ENUMX
++ BFD_RELOC_AVR32_3U
++ENUMDOC
++ Code relocations that will never make it to the output file.
++
++ENUM
+ BFD_RELOC_390_12
+ ENUMDOC
+ Direct 12 bit.
+--- a/bfd/targets.c
++++ b/bfd/targets.c
+@@ -568,6 +568,7 @@ extern const bfd_target b_out_vec_big_ho
+ extern const bfd_target b_out_vec_little_host;
+ extern const bfd_target bfd_pei_ia64_vec;
+ extern const bfd_target bfd_elf32_avr_vec;
++extern const bfd_target bfd_elf32_avr32_vec;
+ extern const bfd_target bfd_elf32_bfin_vec;
+ extern const bfd_target bfd_elf32_bfinfdpic_vec;
+ extern const bfd_target bfd_elf32_big_generic_vec;
+@@ -896,6 +897,7 @@ static const bfd_target * const _bfd_tar
+ &bfd_pei_ia64_vec,
+ #endif
+ &bfd_elf32_avr_vec,
++ &bfd_elf32_avr32_vec,
+ &bfd_elf32_bfin_vec,
+ &bfd_elf32_bfinfdpic_vec,
+
+--- a/binutils/doc/binutils.info
++++ b/binutils/doc/binutils.info
+@@ -1665,6 +1665,10 @@ equivalent. At least one option from th
+ useful when attempting to disassemble thumb code produced by other
+ compilers.
+
++ For the AVR32 architectures that support Floating point unit (FPU),
++ specifying '-M decode-fpu' will enable disassembler to print the
++ floating point instruction instead of 'cop' instructions.
++
+ For the x86, some of the options duplicate functions of the `-m'
+ switch, but allow finer grained control. Multiple selections from
+ the following may be specified as a comma separated string.
+--- a/binutils/doc/binutils.texi
++++ b/binutils/doc/binutils.texi
+@@ -1935,6 +1935,10 @@ using the switch @option{--disassembler-
+ useful when attempting to disassemble thumb code produced by other
+ compilers.
+
++For the AVR32 architectures that support Floating point unit (FPU),
++specifying @option{-M decode-fpu} will enable disassembler to print the
++floating point instructions instead of 'cop' instructions.
++
+ For the x86, some of the options duplicate functions of the @option{-m}
+ switch, but allow finer grained control. Multiple selections from the
+ following may be specified as a comma separated string.
+--- a/binutils/doc/objdump.1
++++ b/binutils/doc/objdump.1
+@@ -425,6 +425,10 @@ using the switch \fB\-\-disassembler\-op
+ useful when attempting to disassemble thumb code produced by other
+ compilers.
+ .Sp
++For the \s-1AVR32\s0 architectures that support Floating point unit (FPU),
++specifying \fB\-M decode\-fpu\fR will enable disassembler to print the
++floating point instructions instead of 'cop' instructions.
++.Sp
+ For the x86, some of the options duplicate functions of the \fB\-m\fR
+ switch, but allow finer grained control. Multiple selections from the
+ following may be specified as a comma separated string.
+--- a/binutils/readelf.c
++++ b/binutils/readelf.c
+@@ -94,6 +94,7 @@
+ #include "elf/arc.h"
+ #include "elf/arm.h"
+ #include "elf/avr.h"
++#include "elf/avr32.h"
+ #include "elf/bfin.h"
+ #include "elf/cr16.h"
+ #include "elf/cris.h"
+@@ -570,6 +571,7 @@ guess_is_rela (unsigned int e_machine)
+ case EM_ALPHA:
+ case EM_ALTERA_NIOS2:
+ case EM_AVR:
++ case EM_AVR32:
+ case EM_AVR_OLD:
+ case EM_BLACKFIN:
+ case EM_CR16:
+@@ -1020,6 +1022,10 @@ dump_relocations (FILE * file,
+ rtype = elf_avr_reloc_type (type);
+ break;
+
++ case EM_AVR32:
++ rtype = elf_avr32_reloc_type (type);
++ break;
++
+ case EM_OLD_SPARCV9:
+ case EM_SPARC32PLUS:
+ case EM_SPARCV9:
+@@ -1853,6 +1859,7 @@ get_machine_name (unsigned e_machine)
+ case EM_VAX: return "Digital VAX";
+ case EM_AVR_OLD:
+ case EM_AVR: return "Atmel AVR 8-bit microcontroller";
++ case EM_AVR32: return "Atmel AVR32 32-bit microprocessor";
+ case EM_CRIS: return "Axis Communications 32-bit embedded processor";
+ case EM_JAVELIN: return "Infineon Technologies 32-bit embedded cpu";
+ case EM_FIREPATH: return "Element 14 64-bit DSP processor";
+--- a/gas/as.c
++++ b/gas/as.c
+@@ -445,10 +445,10 @@ parse_args (int * pargc, char *** pargv)
+ the end of the preceeding line so that it is simpler to
+ selectively add and remove lines from this list. */
+ {"alternate", no_argument, NULL, OPTION_ALTERNATE}
+- /* The entry for "a" is here to prevent getopt_long_only() from
+- considering that -a is an abbreviation for --alternate. This is
+- necessary because -a=<FILE> is a valid switch but getopt would
+- normally reject it since --alternate does not take an argument. */
++ /* The next two entries are here to prevent getopt_long_only() from
++ considering that -a or -al is an abbreviation for --alternate.
++ This is necessary because -a=<FILE> is a valid switch but getopt
++ would normally reject it since --alternate does not take an argument. */
+ ,{"a", optional_argument, NULL, 'a'}
+ /* Handle -al=<FILE>. */
+ ,{"al", optional_argument, NULL, OPTION_AL}
+@@ -811,8 +811,15 @@ This program has absolutely no warranty.
+ case 'a':
+ if (optarg)
+ {
+- if (optarg != old_argv[optind] && optarg[-1] == '=')
++ /* If optarg is part of the -a switch and not a separate argument
++ in its own right, then scan backwards to the just after the -a.
++ This means skipping over both '=' and 'l' which might have been
++ taken to be part of the -a switch itself. */
++ if (optarg != old_argv[optind])
++ {
++ while (optarg[-1] == '=' || optarg[-1] == 'l')
+ --optarg;
++ }
+
+ if (md_parse_option (optc, optarg) != 0)
+ break;
+@@ -1245,7 +1252,7 @@ main (int argc, char ** argv)
+ keep_it = 0;
+
+ if (!keep_it)
+- unlink_if_ordinary (out_file_name);
++ unlink (out_file_name);
+
+ input_scrub_end ();
+
+--- a/gas/as.h
++++ b/gas/as.h
+@@ -110,6 +110,7 @@ typedef int * va_list;
+ #endif
+ #define gas_assert(P) \
+ ((void) ((P) ? 0 : (as_assert (__FILE__, __LINE__, __PRETTY_FUNCTION__), 0)))
++#define assert(P) gas_assert(P)
+ #undef abort
+ #define abort() as_abort (__FILE__, __LINE__, __PRETTY_FUNCTION__)
+
+--- a/gas/atof-generic.c
++++ b/gas/atof-generic.c
+@@ -121,6 +121,21 @@ atof_generic (/* return pointer to just
+
+ switch (first_digit[0])
+ {
++ case 's':
++ case 'S':
++ case 'q':
++ case 'Q':
++ if (!strncasecmp ("nan", first_digit+1, 3))
++ {
++ address_of_generic_floating_point_number->sign = 0;
++ address_of_generic_floating_point_number->exponent = 0;
++ address_of_generic_floating_point_number->leader =
++ address_of_generic_floating_point_number->low;
++ *address_of_string_pointer = first_digit + 4;
++ return 0;
++ }
++ break;
++
+ case 'n':
+ case 'N':
+ if (!strncasecmp ("nan", first_digit, 3))
+--- a/gas/config/atof-vax.c
++++ b/gas/config/atof-vax.c
+@@ -268,9 +268,27 @@ flonum_gen2vax (int format_letter, /* On
+ int exponent_skippage;
+ LITTLENUM_TYPE word1;
+
+- /* JF: Deal with new Nan, +Inf and -Inf codes. */
++ /* JF: Deal with new +/-(q/Q/s/S)Nan, +Inf and -Inf codes. */
+ if (f->sign != '-' && f->sign != '+')
+ {
++ if (f->sign == 0)
++ {
++ /* All NaNs are 0. */
++ memset (words, 0x00, sizeof (LITTLENUM_TYPE) * precision);
++ }
++ else if (f->sign == 'P')
++ {
++ /* Positive Infinity. */
++ memset (words, 0xff, sizeof (LITTLENUM_TYPE) * precision);
++ words[0] &= 0x7fff;
++ }
++ else if (f->sign == 'N')
++ {
++ /* Negative Infinity. */
++ memset (words, 0x00, sizeof (LITTLENUM_TYPE) * precision);
++ words[0] = 0x0080;
++ }
++ else
+ make_invalid_floating_point_number (words);
+ return return_value;
+ }
+--- /dev/null
++++ b/gas/config/tc-avr32.c
+@@ -0,0 +1,4839 @@
++/* Assembler implementation for AVR32.
++ Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of GAS, the GNU Assembler.
++
++ GAS is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2, or (at your option)
++ any later version.
++
++ GAS is distributed in the hope that it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GAS; see the file COPYING. If not, write to the Free
++ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++
++#include <stdio.h>
++#include "as.h"
++#include "safe-ctype.h"
++#include "subsegs.h"
++#include "symcat.h"
++#include "opcodes/avr32-opc.h"
++#include "opcodes/avr32-asm.h"
++#include "elf/avr32.h"
++#include "dwarf2dbg.h"
++
++#define xDEBUG
++#define xOPC_CONSISTENCY_CHECK
++
++#ifdef DEBUG
++# define pr_debug(fmt, args...) fprintf(stderr, fmt, ##args)
++#else
++# define pr_debug(fmt, args...)
++#endif
++
++/* 3 MSB of instruction word indicate group. Group 7 -> extended */
++#define AVR32_COMPACT_P(opcode) ((opcode[0] & 0xe0) != 0xe0)
++
++#define streq(a, b) (strcmp(a, b) == 0)
++#define skip_whitespace(str) do { while(*(str) == ' ') ++(str); } while(0)
++
++/* Flags given on the command line */
++static int avr32_pic = FALSE;
++int linkrelax = FALSE;
++int avr32_iarcompat = FALSE;
++
++/* This array holds the chars that always start a comment. */
++const char comment_chars[] = "#";
++
++/* This array holds the chars that only start a comment at the
++ beginning of a line. We must include '#' here because the compiler
++ may produce #APP and #NO_APP in its output. */
++const char line_comment_chars[] = "#";
++
++/* These may be used instead of newline (same as ';' in C). */
++const char line_separator_chars[] = ";";
++
++/* Chars that can be used to separate mantissa from exponent in
++ floating point numbers. */
++const char EXP_CHARS[] = "eE";
++
++/* Chars that mean this number is a floating point constant. */
++const char FLT_CHARS[] = "dD";
++
++/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
++symbolS *GOT_symbol;
++
++static struct hash_control *avr32_mnemonic_htab;
++
++struct avr32_ifield_data
++{
++ bfd_vma value;
++ /* FIXME: Get rid of align_order and complain. complain is never
++ used, align_order is used in one place. Try to use the relax
++ table instead. */
++ unsigned int align_order;
++};
++
++struct avr32_insn
++{
++ const struct avr32_syntax *syntax;
++ expressionS immediate;
++ int pcrel;
++ int force_extended;
++ unsigned int next_slot;
++ bfd_reloc_code_real_type r_type;
++ struct avr32_ifield_data field_value[AVR32_MAX_FIELDS];
++};
++
++static struct avr32_insn current_insn;
++
++/* The target specific pseudo-ops we support. */
++static void s_rseg (int);
++static void s_cpool(int);
++
++const pseudo_typeS md_pseudo_table[] =
++{
++ /* Make sure that .word is 32 bits */
++ { "word", cons, 4 },
++ { "file", (void (*) PARAMS ((int))) dwarf2_directive_file, 0 },
++ { "loc", dwarf2_directive_loc, 0 },
++
++ /* .lcomm requires an explicit alignment parameter */
++ { "lcomm", s_lcomm, 1 },
++
++ /* AVR32-specific pseudo-ops */
++ { "cpool", s_cpool, 0},
++
++ /* IAR compatible pseudo-ops */
++ { "program", s_ignore, 0 },
++ { "public", s_globl, 0 },
++ { "extern", s_ignore, 0 },
++ { "module", s_ignore, 0 },
++ { "rseg", s_rseg, 0 },
++ { "dc8", cons, 1 },
++ { "dc16", cons, 2 },
++ { "dc32", cons, 4 },
++
++ { NULL, NULL, 0 }
++};
++
++/* Questionable stuff starts here */
++
++enum avr32_opinfo {
++ AVR32_OPINFO_NONE = BFD_RELOC_NONE,
++ AVR32_OPINFO_GOT,
++ AVR32_OPINFO_TLSGD,
++ AVR32_OPINFO_HI,
++ AVR32_OPINFO_LO,
++};
++
++enum avr32_arch {
++ ARCH_TYPE_AP,
++ ARCH_TYPE_UCR1,
++ ARCH_TYPE_UCR2,
++ ARCH_TYPE_UCR3,
++ ARCH_TYPE_UCR3FP
++};
++
++struct arch_type_s
++{
++ /* Architecture name */
++ char *name;
++ /* Instruction Set Architecture Flags */
++ unsigned long isa_flags;
++};
++
++struct part_type_s
++{
++ /* Part name */
++ char *name;
++ /* Architecture type */
++ unsigned int arch;
++};
++
++static struct arch_type_s arch_types[] =
++{
++ {"ap", AVR32_V1 | AVR32_SIMD | AVR32_DSP | AVR32_PICO},
++ {"ucr1", AVR32_V1 | AVR32_DSP | AVR32_RMW},
++ {"ucr2", AVR32_V1 | AVR32_V2 | AVR32_DSP | AVR32_RMW},
++ {"ucr3", AVR32_V1 | AVR32_V2 | AVR32_V3 | AVR32_DSP | AVR32_RMW},
++ {"ucr3fp", AVR32_V1 | AVR32_V2 | AVR32_V3 | AVR32_DSP | AVR32_RMW | AVR32_V3FP},
++ {"all-insn", AVR32_V1 | AVR32_V2 | AVR32_V3 | AVR32_SIMD | AVR32_DSP | AVR32_RMW | AVR32_V3FP | AVR32_PICO},
++ {NULL, 0}
++};
++
++static struct part_type_s part_types[] =
++{
++ {"ap7000", ARCH_TYPE_AP},
++ {"ap7001", ARCH_TYPE_AP},
++ {"ap7002", ARCH_TYPE_AP},
++ {"ap7200", ARCH_TYPE_AP},
++ {"uc3a0128", ARCH_TYPE_UCR2},
++ {"uc3a0256", ARCH_TYPE_UCR2},
++ {"uc3a0512es", ARCH_TYPE_UCR1},
++ {"uc3a0512", ARCH_TYPE_UCR2},
++ {"uc3a1128", ARCH_TYPE_UCR2},
++ {"uc3a1256es", ARCH_TYPE_UCR1},
++ {"uc3a1256", ARCH_TYPE_UCR2},
++ {"uc3a1512es", ARCH_TYPE_UCR1},
++ {"uc3a1512", ARCH_TYPE_UCR2},
++ {"uc3a364", ARCH_TYPE_UCR2},
++ {"uc3a364s", ARCH_TYPE_UCR2},
++ {"uc3a3128", ARCH_TYPE_UCR2},
++ {"uc3a3128s", ARCH_TYPE_UCR2},
++ {"uc3a3256", ARCH_TYPE_UCR2},
++ {"uc3a3256s", ARCH_TYPE_UCR2},
++ {"uc3b064", ARCH_TYPE_UCR1},
++ {"uc3b0128", ARCH_TYPE_UCR1},
++ {"uc3b0256es", ARCH_TYPE_UCR1},
++ {"uc3b0256", ARCH_TYPE_UCR1},
++ {"uc3b0512", ARCH_TYPE_UCR2},
++ {"uc3b0512revc", ARCH_TYPE_UCR2},
++ {"uc3b164", ARCH_TYPE_UCR1},
++ {"uc3b1128", ARCH_TYPE_UCR1},
++ {"uc3b1256", ARCH_TYPE_UCR1},
++ {"uc3b1256es", ARCH_TYPE_UCR1},
++ {"uc3b1512", ARCH_TYPE_UCR2},
++ {"uc3b1512revc", ARCH_TYPE_UCR2},
++ {"uc3c0512crevc", ARCH_TYPE_UCR3},
++ {"uc3c1512crevc", ARCH_TYPE_UCR3},
++ {"uc3c2512crevc", ARCH_TYPE_UCR3},
++ {"atuc3l0256", ARCH_TYPE_UCR3},
++ {"mxt768e", ARCH_TYPE_UCR3},
++ {"uc3l064", ARCH_TYPE_UCR3},
++ {"uc3l032", ARCH_TYPE_UCR3},
++ {"uc3l016", ARCH_TYPE_UCR3},
++ {"uc3l064revb", ARCH_TYPE_UCR3},
++ {"uc3c064c", ARCH_TYPE_UCR3FP},
++ {"uc3c0128c", ARCH_TYPE_UCR3FP},
++ {"uc3c0256c", ARCH_TYPE_UCR3FP},
++ {"uc3c0512c", ARCH_TYPE_UCR3FP},
++ {"uc3c164c", ARCH_TYPE_UCR3FP},
++ {"uc3c1128c", ARCH_TYPE_UCR3FP},
++ {"uc3c1256c", ARCH_TYPE_UCR3FP},
++ {"uc3c1512c", ARCH_TYPE_UCR3FP},
++ {"uc3c264c", ARCH_TYPE_UCR3FP},
++ {"uc3c2128c", ARCH_TYPE_UCR3FP},
++ {"uc3c2256c", ARCH_TYPE_UCR3FP},
++ {"uc3c2512c", ARCH_TYPE_UCR3FP},
++ {NULL, 0}
++};
++
++/* Current architecture type. */
++static struct arch_type_s default_arch = {"all-insn", AVR32_V1 | AVR32_V2 | AVR32_V3 | AVR32_SIMD | AVR32_DSP | AVR32_RMW | AVR32_V3FP | AVR32_PICO };
++static struct arch_type_s *avr32_arch = &default_arch;
++
++/* Display nicely formatted list of known part- and architecture names. */
++
++static void
++show_arch_list (FILE *stream)
++{
++ int i, x;
++
++ fprintf (stream, _("Architectures supported by the assembler:"));
++ x = 1000;
++
++ for (i = 0; arch_types[i].name; i++)
++ {
++ int len = strlen (arch_types[i].name);
++
++ x += len + 1;
++
++ if (x < 75)
++ fprintf (stream, " %s", arch_types[i].name);
++ else
++ {
++ fprintf (stream, "\n %s", arch_types[i].name);
++ x = len + 2;
++ }
++ }
++
++ fprintf (stream, "\n");
++}
++
++static void
++show_part_list (FILE *stream)
++{
++ int i, x;
++
++ fprintf (stream, _("Known part names:"));
++ x = 1000;
++
++ for (i = 0; part_types[i].name; i++)
++ {
++ int len = strlen(part_types[i].name);
++
++ x += len + 1;
++
++ if (x < 75)
++ fprintf (stream, " %s", part_types[i].name);
++ else
++ {
++ fprintf(stream, "\n %s", part_types[i].name);
++ x = len + 2;
++ }
++ }
++
++ fprintf (stream, "\n");
++}
++
++const char *md_shortopts = "";
++struct option md_longopts[] =
++{
++#define OPTION_ARCH (OPTION_MD_BASE)
++#define OPTION_PART (OPTION_ARCH + 1)
++#define OPTION_IAR (OPTION_PART + 1)
++#define OPTION_PIC (OPTION_IAR + 1)
++#define OPTION_NOPIC (OPTION_PIC + 1)
++#define OPTION_LINKRELAX (OPTION_NOPIC + 1)
++#define OPTION_NOLINKRELAX (OPTION_LINKRELAX + 1)
++#define OPTION_DIRECT_DATA_REFS (OPTION_NOLINKRELAX + 1)
++ {"march", required_argument, NULL, OPTION_ARCH},
++ {"mpart", required_argument, NULL, OPTION_PART},
++ {"iar", no_argument, NULL, OPTION_IAR},
++ {"pic", no_argument, NULL, OPTION_PIC},
++ {"no-pic", no_argument, NULL, OPTION_NOPIC},
++ {"linkrelax", no_argument, NULL, OPTION_LINKRELAX},
++ {"no-linkrelax", no_argument, NULL, OPTION_NOLINKRELAX},
++ /* deprecated alias for -mpart=xxx */
++ {"mcpu", required_argument, NULL, OPTION_PART},
++ {NULL, no_argument, NULL, 0}
++};
++
++size_t md_longopts_size = sizeof (md_longopts);
++
++void
++md_show_usage (FILE *stream)
++{
++ fprintf (stream, _("\
++AVR32 options:\n\
++ -march=[arch-name] Select cpu architecture. [Default `all-insn']\n\
++ -mpart=[part-name] Select specific part. [Default `none']\n\
++ --pic Produce Position-Independent Code\n\
++ --no-pic Don't produce Position-Independent Code\n\
++ --linkrelax Produce output suitable for linker relaxing\n\
++ --no-linkrelax Don't produce output suitable for linker relaxing\n"));
++ show_arch_list(stream);
++}
++
++int
++md_parse_option (int c, char *arg ATTRIBUTE_UNUSED)
++{
++ switch (c)
++ {
++ case OPTION_ARCH:
++ {
++ int i;
++ char *s = alloca (strlen (arg) + 1);
++
++ {
++ char *t = s;
++ char *arg1 = arg;
++
++ do
++ *t = TOLOWER (*arg1++);
++ while (*t++);
++ }
++
++ /* Add backward compability */
++ if (strcmp ("uc", s)== 0)
++ {
++ as_warn("Deprecated arch `%s' specified. "
++ "Please use '-march=ucr1' instead. "
++ "Using to arch 'ucr1'\n",
++ s);
++ s="ucr1";
++ }
++
++ for (i = 0; arch_types[i].name; ++i)
++ if (strcmp (arch_types[i].name, s) == 0)
++ break;
++
++ if (!arch_types[i].name)
++ {
++ show_arch_list (stderr);
++ as_fatal (_("unknown architecture: %s\n"), arg);
++ }
++
++ avr32_arch = &arch_types[i];
++ break;
++ }
++ case OPTION_PART:
++ {
++ int i;
++ char *s = alloca (strlen (arg) + 1);
++ char *t = s;
++ char *p = arg;
++
++ /* If arch type has already been set, don't bother.
++ -march= always overrides -mpart= */
++ if (avr32_arch != &default_arch)
++ break;
++
++ do
++ *t = TOLOWER (*p++);
++ while (*t++);
++
++ for (i = 0; part_types[i].name; ++i)
++ if (strcmp (part_types[i].name, s) == 0)
++ break;
++
++ if (!part_types[i].name)
++ {
++ show_part_list (stderr);
++ as_fatal (_("unknown part: %s\n"), arg);
++ }
++
++ avr32_arch = &arch_types[part_types[i].arch];
++ break;
++ }
++ case OPTION_IAR:
++ avr32_iarcompat = 1;
++ break;
++ case OPTION_PIC:
++ avr32_pic = 1;
++ break;
++ case OPTION_NOPIC:
++ avr32_pic = 0;
++ break;
++ case OPTION_LINKRELAX:
++ linkrelax = 1;
++ break;
++ case OPTION_NOLINKRELAX:
++ linkrelax = 0;
++ break;
++ default:
++ return 0;
++ }
++ return 1;
++}
++
++/* Can't use symbol_new here, so have to create a symbol and then at
++ a later date assign it a value. Thats what these functions do.
++
++ Shamelessly stolen from ARM. */
++
++static void
++symbol_locate (symbolS * symbolP,
++ const char * name, /* It is copied, the caller can modify. */
++ segT segment, /* Segment identifier (SEG_<something>). */
++ valueT valu, /* Symbol value. */
++ fragS * frag) /* Associated fragment. */
++{
++ unsigned int name_length;
++ char * preserved_copy_of_name;
++
++ name_length = strlen (name) + 1; /* +1 for \0. */
++ obstack_grow (&notes, name, name_length);
++ preserved_copy_of_name = obstack_finish (&notes);
++#ifdef STRIP_UNDERSCORE
++ if (preserved_copy_of_name[0] == '_')
++ preserved_copy_of_name++;
++#endif
++
++#ifdef tc_canonicalize_symbol_name
++ preserved_copy_of_name =
++ tc_canonicalize_symbol_name (preserved_copy_of_name);
++#endif
++
++ S_SET_NAME (symbolP, preserved_copy_of_name);
++
++ S_SET_SEGMENT (symbolP, segment);
++ S_SET_VALUE (symbolP, valu);
++ symbol_clear_list_pointers (symbolP);
++
++ symbol_set_frag (symbolP, frag);
++
++ /* Link to end of symbol chain. */
++ {
++ extern int symbol_table_frozen;
++
++ if (symbol_table_frozen)
++ abort ();
++ }
++
++ symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
++
++ obj_symbol_new_hook (symbolP);
++
++#ifdef tc_symbol_new_hook
++ tc_symbol_new_hook (symbolP);
++#endif
++
++#ifdef DEBUG_SYMS
++ verify_symbol_chain (symbol_rootP, symbol_lastP);
++#endif /* DEBUG_SYMS */
++}
++
++struct cpool_entry
++{
++ int refcount;
++ offsetT offset;
++ expressionS exp;
++};
++
++struct cpool
++{
++ struct cpool *next;
++ int used;
++ struct cpool_entry *literals;
++ unsigned int padding;
++ unsigned int next_free_entry;
++ unsigned int id;
++ symbolS *symbol;
++ segT section;
++ subsegT sub_section;
++};
++
++struct cpool *cpool_list = NULL;
++
++static struct cpool *
++find_cpool(segT section, subsegT sub_section)
++{
++ struct cpool *pool;
++
++ for (pool = cpool_list; pool != NULL; pool = pool->next)
++ {
++ if (!pool->used
++ && pool->section == section
++ && pool->sub_section == sub_section)
++ break;
++ }
++
++ return pool;
++}
++
++static struct cpool *
++find_or_make_cpool(segT section, subsegT sub_section)
++{
++ static unsigned int next_cpool_id = 0;
++ struct cpool *pool;
++
++ pool = find_cpool(section, sub_section);
++
++ if (!pool)
++ {
++ pool = xmalloc(sizeof(*pool));
++ if (!pool)
++ return NULL;
++
++ pool->used = 0;
++ pool->literals = NULL;
++ pool->padding = 0;
++ pool->next_free_entry = 0;
++ pool->section = section;
++ pool->sub_section = sub_section;
++ pool->next = cpool_list;
++ pool->symbol = NULL;
++
++ cpool_list = pool;
++ }
++
++ /* NULL symbol means that the pool is new or has just been emptied. */
++ if (!pool->symbol)
++ {
++ pool->symbol = symbol_create(FAKE_LABEL_NAME, undefined_section,
++ 0, &zero_address_frag);
++ pool->id = next_cpool_id++;
++ }
++
++ return pool;
++}
++
++static struct cpool *
++add_to_cpool(expressionS *exp, unsigned int *index, int ref)
++{
++ struct cpool *pool;
++ unsigned int entry;
++
++ pool = find_or_make_cpool(now_seg, now_subseg);
++
++ /* Check if this constant is already in the pool. */
++ for (entry = 0; entry < pool->next_free_entry; entry++)
++ {
++ if ((pool->literals[entry].exp.X_op == exp->X_op)
++ && (exp->X_op == O_constant)
++ && (pool->literals[entry].exp.X_add_number
++ == exp->X_add_number)
++ && (pool->literals[entry].exp.X_unsigned
++ == exp->X_unsigned))
++ break;
++
++ if ((pool->literals[entry].exp.X_op == exp->X_op)
++ && (exp->X_op == O_symbol)
++ && (pool->literals[entry].exp.X_add_number
++ == exp->X_add_number)
++ && (pool->literals[entry].exp.X_add_symbol
++ == exp->X_add_symbol)
++ && (pool->literals[entry].exp.X_op_symbol
++ == exp->X_op_symbol))
++ break;
++ }
++
++ /* Create an entry if we didn't find a match */
++ if (entry == pool->next_free_entry)
++ {
++ pool->literals = xrealloc(pool->literals,
++ sizeof(struct cpool_entry) * (entry + 1));
++ pool->literals[entry].exp = *exp;
++ pool->literals[entry].refcount = 0;
++ pool->next_free_entry++;
++ }
++
++ if (index)
++ *index = entry;
++ if (ref)
++ pool->literals[entry].refcount++;
++
++ return pool;
++}
++
++struct avr32_operand
++{
++ int id;
++ int is_signed;
++ int is_pcrel;
++ int align_order;
++ int (*match)(char *str);
++ void (*parse)(const struct avr32_operand *op, char *str, int opindex);
++};
++
++static int
++match_anything(char *str ATTRIBUTE_UNUSED)
++{
++ return 1;
++}
++
++static int
++match_intreg(char *str)
++{
++ int regid, ret = 1;
++
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ ret = 0;
++
++ pr_debug("match_intreg: `%s': %d\n", str, ret);
++
++ return ret;
++}
++
++static int
++match_intreg_predec(char *str)
++{
++ int regid;
++
++ if (str[0] != '-' || str[1] != '-')
++ return 0;
++
++ regid = avr32_parse_intreg(str + 2);
++ if (regid < 0)
++ return 0;
++
++ return 1;
++}
++
++static int
++match_intreg_postinc(char *str)
++{
++ int regid, ret = 1;
++ char *p, c;
++
++ for (p = str; *p; p++)
++ if (*p == '+')
++ break;
++
++ if (p[0] != '+' || p[1] != '+')
++ return 0;
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ ret = 0;
++
++ *p = c;
++ return ret;
++}
++
++static int
++match_intreg_lsl(char *str)
++{
++ int regid, ret = 1;
++ char *p, c;
++
++ for (p = str; *p; p++)
++ if (*p == '<')
++ break;
++
++ if (p[0] && p[1] != '<')
++ return 0;
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ ret = 0;
++
++ *p = c;
++ return ret;
++}
++
++static int
++match_intreg_lsr(char *str)
++{
++ int regid, ret = 1;
++ char *p, c;
++
++ for (p = str; *p; p++)
++ if (*p == '>')
++ break;
++
++ if (p[0] && p[1] != '>')
++ return 0;
++
++ c = *p, *p = 0;
++
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ ret = 0;
++
++ *p = c;
++ return ret;
++}
++
++static int
++match_intreg_part(char *str)
++{
++ int regid, ret = 1;
++ char *p, c;
++
++ for (p = str; *p; p++)
++ if (*p == ':')
++ break;
++
++ if (p[0] != ':' || !ISPRINT(p[1]) || p[2] != '\0')
++ return 0;
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ ret = 0;
++
++ *p = c;
++
++ return ret;
++}
++
++#define match_intreg_disp match_anything
++
++static int
++match_intreg_index(char *str)
++{
++ int regid, ret = 1;
++ char *p, *end, c;
++
++ for (p = str; *p; p++)
++ if (*p == '[')
++ break;
++
++ /* don't allow empty displacement here (it makes no sense) */
++ if (p[0] != '[')
++ return 0;
++
++ for (end = p + 1; *end; end++) ;
++ if (*(--end) != ']')
++ return 0;
++
++ c = *end, *end = 0;
++ if (!match_intreg_lsl(p + 1))
++ ret = 0;
++ *end = c;
++
++ if (ret)
++ {
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ ret = 0;
++ *p = c;
++ }
++
++ return ret;
++}
++
++static int
++match_intreg_xindex(char *str)
++{
++ int regid, ret = 1;
++ char *p, *end, c;
++
++ for (p = str; *p; p++)
++ if (*p == '[')
++ break;
++
++ /* empty displacement makes no sense here either */
++ if (p[0] != '[')
++ return 0;
++
++ for (end = p + 1; *end; end++)
++ if (*end == '<')
++ break;
++
++ if (!streq(end, "<<2]"))
++ return 0;
++
++ c = *end, *end = 0;
++ if (!match_intreg_part(p + 1))
++ ret = 0;
++ *end = c;
++
++ if (ret)
++ {
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ ret = 0;
++ *p = c;
++ }
++
++ return ret;
++}
++
++/* The PC_UDISP_W operator may show up as a label or as a pc[disp]
++ expression. So there's no point in attempting to match this... */
++#define match_pc_disp match_anything
++
++static int
++match_sp(char *str)
++{
++ /* SP in any form will do */
++ return avr32_parse_intreg(str) == AVR32_REG_SP;
++}
++
++static int
++match_sp_disp(char *str)
++{
++ int regid, ret = 1;
++ char *p, c;
++
++ for (p = str; *p; p++)
++ if (*p == '[')
++ break;
++
++ /* allow empty displacement, meaning zero */
++ if (p[0] == '[')
++ {
++ char *end;
++ for (end = p + 1; *end; end++) ;
++ if (end[-1] != ']')
++ return 0;
++ }
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ if (regid != AVR32_REG_SP)
++ ret = 0;
++
++ *p = c;
++ return ret;
++}
++
++static int
++match_cpno(char *str)
++{
++ if (strncasecmp(str, "cp", 2) != 0)
++ return 0;
++ return 1;
++}
++
++static int
++match_cpreg(char *str)
++{
++ if (strncasecmp(str, "cr", 2) != 0)
++ return 0;
++ return 1;
++}
++
++/* We allow complex expressions, and register names may show up as
++ symbols. Just make sure immediate expressions are always matched
++ last. */
++#define match_const match_anything
++#define match_jmplabel match_anything
++#define match_number match_anything
++
++/* Mnemonics that take reglists never accept anything else */
++#define match_reglist8 match_anything
++#define match_reglist9 match_anything
++#define match_reglist16 match_anything
++#define match_reglist_ldm match_anything
++#define match_reglist_cp8 match_anything
++#define match_reglist_cpd8 match_anything
++
++/* Ditto for retval, jospinc and mcall */
++#define match_retval match_anything
++#define match_jospinc match_anything
++#define match_mcall match_anything
++
++/* COH is used to select between two different syntaxes */
++static int
++match_coh(char *str)
++{
++ return strcasecmp(str, "coh") == 0;
++}
++#if 0
++static int
++match_fpreg(char *str)
++{
++ unsigned long regid;
++ char *endptr;
++
++ if ((str[0] != 'f' && str[0] != 'F')
++ || (str[1] != 'r' && str[1] != 'R'))
++ return 0;
++
++ str += 2;
++ regid = strtoul(str, &endptr, 10);
++ if (!*str || *endptr)
++ return 0;
++
++ return 1;
++}
++#endif
++
++static int
++match_picoreg(char *str)
++{
++ int regid;
++
++ regid = avr32_parse_picoreg(str);
++ if (regid < 0)
++ return 0;
++ return 1;
++}
++
++#define match_pico_reglist_w match_anything
++#define match_pico_reglist_d match_anything
++
++static int
++match_pico_in(char *str)
++{
++ unsigned long regid;
++ char *end;
++
++ if (strncasecmp(str, "in", 2) != 0)
++ return 0;
++
++ str += 2;
++ regid = strtoul(str, &end, 10);
++ if (!*str || *end)
++ return 0;
++
++ return 1;
++}
++
++static int
++match_pico_out0(char *str)
++{
++ if (strcasecmp(str, "out0") != 0)
++ return 0;
++ return 1;
++}
++
++static int
++match_pico_out1(char *str)
++{
++ if (strcasecmp(str, "out1") != 0)
++ return 0;
++ return 1;
++}
++
++static int
++match_pico_out2(char *str)
++{
++ if (strcasecmp(str, "out2") != 0)
++ return 0;
++ return 1;
++}
++
++static int
++match_pico_out3(char *str)
++{
++ if (strcasecmp(str, "out3") != 0)
++ return 0;
++ return 1;
++}
++
++static void parse_nothing(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str ATTRIBUTE_UNUSED,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ /* Do nothing (this is used for "match-only" operands like COH) */
++}
++
++static void
++parse_const(const struct avr32_operand *op, char *str,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ expressionS *exp = &current_insn.immediate;
++ expressionS *sym_exp;
++ int slot;
++ char *save;
++
++ pr_debug("parse_const: `%s' (signed: %d, pcrel: %d, align: %d)\n",
++ str, op->is_signed, op->is_pcrel, op->align_order);
++
++ save = input_line_pointer;
++ input_line_pointer = str;
++
++ expression(exp);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].align_order = op->align_order;
++ current_insn.pcrel = op->is_pcrel;
++
++ switch (exp->X_op)
++ {
++ case O_illegal:
++ as_bad(_("illegal operand"));
++ break;
++ case O_absent:
++ as_bad(_("missing operand"));
++ break;
++ case O_constant:
++ pr_debug(" -> constant: %ld\n", (long)exp->X_add_number);
++ current_insn.field_value[slot].value = exp->X_add_number;
++ break;
++ case O_uminus:
++ pr_debug(" -> uminus\n");
++ sym_exp = symbol_get_value_expression(exp->X_add_symbol);
++ switch (sym_exp->X_op) {
++ case O_subtract:
++ pr_debug(" -> subtract: switching operands\n");
++ exp->X_op_symbol = sym_exp->X_add_symbol;
++ exp->X_add_symbol = sym_exp->X_op_symbol;
++ exp->X_op = O_subtract;
++ /* TODO: Remove the old X_add_symbol */
++ break;
++ default:
++ as_bad(_("Expression too complex\n"));
++ break;
++ }
++ break;
++#if 0
++ case O_subtract:
++ /* Any expression subtracting a symbol from the current section
++ can be made PC-relative by adding the right offset. */
++ if (S_GET_SEGMENT(exp->X_op_symbol) == now_seg)
++ current_insn.pcrel = TRUE;
++ pr_debug(" -> subtract: pcrel? %s\n",
++ current_insn.pcrel ? "yes" : "no");
++ /* fall through */
++#endif
++ default:
++ pr_debug(" -> (%p <%d> %p + %d)\n",
++ exp->X_add_symbol, exp->X_op, exp->X_op_symbol,
++ exp->X_add_number);
++ current_insn.field_value[slot].value = 0;
++ break;
++ }
++
++ input_line_pointer = save;
++}
++
++static void
++parse_jmplabel(const struct avr32_operand *op, char *str,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ expressionS *exp = &current_insn.immediate;
++ int slot;
++ char *save;
++
++ pr_debug("parse_jmplabel: `%s' (signed: %d, pcrel: %d, align: %d)\n",
++ str, op->is_signed, op->is_pcrel, op->align_order);
++
++ save = input_line_pointer;
++ input_line_pointer = str;
++
++ expression(exp);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].align_order = op->align_order;
++ current_insn.pcrel = TRUE;
++
++ switch (exp->X_op)
++ {
++ case O_illegal:
++ as_bad(_("illegal operand"));
++ break;
++ case O_absent:
++ as_bad(_("missing operand"));
++ break;
++ case O_constant:
++ pr_debug(" -> constant: %ld\n", (long)exp->X_add_number);
++ current_insn.field_value[slot].value = exp->X_add_number;
++ current_insn.pcrel = 0;
++ break;
++ default:
++ pr_debug(" -> (%p <%d> %p + %d)\n",
++ exp->X_add_symbol, exp->X_op, exp->X_op_symbol,
++ exp->X_add_number);
++ current_insn.field_value[slot].value = 0;
++ break;
++ }
++
++ input_line_pointer = save;
++}
++
++static void
++parse_intreg(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ int regid, slot;
++
++ pr_debug("parse_intreg: `%s'\n", str);
++
++ regid = avr32_parse_intreg(str);
++ assert(regid >= 0);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++ current_insn.field_value[slot].align_order = op->align_order;
++}
++
++static void
++parse_intreg_predec(const struct avr32_operand *op, char *str, int opindex)
++{
++ parse_intreg(op, str + 2, opindex);
++}
++
++static void
++parse_intreg_postinc(const struct avr32_operand *op, char *str, int opindex)
++{
++ char *p, c;
++
++ pr_debug("parse_intreg_postinc: `%s'\n", str);
++
++ for (p = str; *p != '+'; p++) ;
++
++ c = *p, *p = 0;
++ parse_intreg(op, str, opindex);
++ *p = c;
++}
++
++static void
++parse_intreg_shift(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ int regid, slot, shift = 0;
++ char *p, c;
++ char shiftop;
++
++ pr_debug("parse Ry<<sa: `%s'\n", str);
++
++ for (p = str; *p; p++)
++ if (*p == '<' || *p == '>')
++ break;
++
++ shiftop = *p;
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ assert(regid >= 0);
++ *p = c;
++
++ if (c)
++ {
++ if (p[0] != shiftop || p[1] != shiftop)
++ as_bad(_("expected shift operator in `%s'"), p);
++ else
++ {
++ expressionS exp;
++ char *saved;
++
++ saved = input_line_pointer;
++ input_line_pointer = p + 2;
++ expression(&exp);
++ input_line_pointer = saved;
++
++ if (exp.X_op != O_constant)
++ as_bad(_("shift amount must be a numeric constant"));
++ else
++ shift = exp.X_add_number;
++ }
++ }
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = shift;
++}
++
++/* The match() function selected the right opcode, so it doesn't
++ matter which way we shift any more. */
++#define parse_intreg_lsl parse_intreg_shift
++#define parse_intreg_lsr parse_intreg_shift
++
++static void
++parse_intreg_part(const struct avr32_operand *op, char *str,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ static const char bparts[] = { 'b', 'l', 'u', 't' };
++ static const char hparts[] = { 'b', 't' };
++ unsigned int slot, sel;
++ int regid;
++ char *p, c;
++
++ pr_debug("parse reg:part `%s'\n", str);
++
++ for (p = str; *p; p++)
++ if (*p == ':')
++ break;
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ assert(regid >= 0);
++ *p = c;
++
++ assert(c == ':');
++
++ if (op->align_order)
++ {
++ for (sel = 0; sel < sizeof(hparts); sel++)
++ if (TOLOWER(p[1]) == hparts[sel])
++ break;
++
++ if (sel >= sizeof(hparts))
++ {
++ as_bad(_("invalid halfword selector `%c' (must be either b or t)"),
++ p[1]);
++ sel = 0;
++ }
++ }
++ else
++ {
++ for (sel = 0; sel < sizeof(bparts); sel++)
++ if (TOLOWER(p[1]) == bparts[sel])
++ break;
++
++ if (sel >= sizeof(bparts))
++ {
++ as_bad(_("invalid byte selector `%c' (must be one of b,l,u,t)"),
++ p[1]);
++ sel = 0;
++ }
++ }
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = sel;
++}
++
++/* This is the parser for "Rp[displacement]" expressions. In addition
++ to the "official" syntax, we accept a label as a replacement for
++ the register expression. This syntax implies Rp=PC and the
++ displacement is the pc-relative distance to the label. */
++static void
++parse_intreg_disp(const struct avr32_operand *op, char *str, int opindex)
++{
++ expressionS *exp = &current_insn.immediate;
++ int slot, regid;
++ char *save, *p, c;
++
++ pr_debug("parse_intreg_disp: `%s' (signed: %d, pcrel: %d, align: %d)\n",
++ str, op->is_signed, op->is_pcrel, op->align_order);
++
++ for (p = str; *p; p++)
++ if (*p == '[')
++ break;
++
++ slot = current_insn.next_slot++;
++
++ /* First, check if we have a valid register either before '[' or as
++ the sole expression. If so, we use the Rp[disp] syntax. */
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ *p = c;
++
++ if (regid >= 0)
++ {
++ current_insn.field_value[slot].value = regid;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].align_order = op->align_order;
++
++ if (c == '[')
++ {
++ save = input_line_pointer;
++ input_line_pointer = p + 1;
++
++ expression(exp);
++
++ if (*input_line_pointer != ']')
++ as_bad(_("junk after displacement expression"));
++
++ input_line_pointer = save;
++
++ switch (exp->X_op)
++ {
++ case O_illegal:
++ as_bad(_("illegal displacement expression"));
++ break;
++ case O_absent:
++ as_bad(_("missing displacement expression"));
++ break;
++ case O_constant:
++ pr_debug(" -> constant: %ld\n", exp->X_add_number);
++ current_insn.field_value[slot].value = exp->X_add_number;
++ break;
++#if 0
++ case O_subtract:
++ if (S_GET_SEGMENT(exp->X_op_symbol) == now_seg)
++ current_insn.pcrel = TRUE;
++ pr_debug(" -> subtract: pcrel? %s\n",
++ current_insn.pcrel ? "yes" : "no");
++ /* fall through */
++#endif
++ default:
++ pr_debug(" -> (%p <%d> %p + %d)\n",
++ exp->X_add_symbol, exp->X_op, exp->X_op_symbol,
++ exp->X_add_number);
++ current_insn.field_value[slot].value = 0;
++ }
++ }
++ else
++ {
++ exp->X_op = O_constant;
++ exp->X_add_number = 0;
++ current_insn.field_value[slot].value = 0;
++ }
++ }
++ else
++ {
++ /* Didn't find a valid register. Try parsing it as a label. */
++ current_insn.field_value[slot].value = AVR32_REG_PC;
++ parse_jmplabel(op, str, opindex);
++ }
++}
++
++static void
++parse_intreg_index(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ int slot, regid;
++ char *p, *end, c;
++
++ for (p = str; *p; p++)
++ if (*p == '[')
++ break;
++
++ assert(*p);
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ assert(regid >= 0);
++ *p = c;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++
++ p++;
++ for (end = p; *end; end++)
++ if (*end == ']' || *end == '<')
++ break;
++
++ assert(*end);
++
++ c = *end, *end = 0;
++ regid = avr32_parse_intreg(p);
++ assert(regid >= 0);
++ *end = c;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = 0;
++
++ if (*end == '<')
++ {
++ expressionS exp;
++ char *save;
++
++ p = end + 2;
++ for (end = p; *end; end++)
++ if (*end == ']')
++ break;
++
++ assert(*end == ']');
++
++ c = *end, *end = 0;
++ save = input_line_pointer;
++ input_line_pointer = p;
++ expression(&exp);
++
++ if (*input_line_pointer)
++ as_bad(_("junk after shift expression"));
++
++ *end = c;
++ input_line_pointer = save;
++
++ if (exp.X_op == O_constant)
++ current_insn.field_value[slot].value = exp.X_add_number;
++ else
++ as_bad(_("shift expression too complex"));
++ }
++}
++
++static void
++parse_intreg_xindex(const struct avr32_operand *op, char *str, int opindex)
++{
++ int slot, regid;
++ char *p, *end, c;
++
++ for (p = str; *p; p++)
++ if (*p == '[')
++ break;
++
++ assert(*p);
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ assert(regid >= 0);
++ *p = c;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++
++ p++;
++ for (end = p; *end; end++)
++ if (*end == '<')
++ break;
++
++ assert(*end);
++
++ c = *end, *end = 0;
++ parse_intreg_part(op, p, opindex);
++ *end = c;
++}
++
++static void
++parse_pc_disp(const struct avr32_operand *op, char *str, int opindex)
++{
++ char *p, c;
++
++ for (p = str; *p; p++)
++ if (*p == '[')
++ break;
++
++ /* The lddpc instruction comes in two different syntax variants:
++ lddpc reg, expression
++ lddpc reg, pc[disp]
++ If the operand contains a '[', we use the second form. */
++ if (*p)
++ {
++ int regid;
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ *p = c;
++ if (regid == AVR32_REG_PC)
++ {
++ char *end;
++
++ for (end = ++p; *end; end++) ;
++ if (*(--end) != ']')
++ as_bad(_("unrecognized form of instruction: `%s'"), str);
++ else
++ {
++ c = *end, *end = 0;
++ parse_const(op, p, opindex);
++ *end = c;
++ current_insn.pcrel = 0;
++ }
++ }
++ else
++ as_bad(_("unrecognized form of instruction: `%s'"), str);
++ }
++ else
++ {
++ parse_jmplabel(op, str, opindex);
++ }
++}
++
++static void parse_sp(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str ATTRIBUTE_UNUSED,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ int slot;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = AVR32_REG_SP;
++}
++
++static void
++parse_sp_disp(const struct avr32_operand *op, char *str, int opindex)
++{
++ char *p, c;
++
++ for (; *str; str++)
++ if (*str == '[')
++ break;
++
++ assert(*str);
++
++ for (p = ++str; *p; p++)
++ if (*p == ']')
++ break;
++
++ c = *p, *p = 0;
++ parse_const(op, str, opindex);
++ *p = c;
++}
++
++static void
++parse_cpno(const struct avr32_operand *op ATTRIBUTE_UNUSED, char *str,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ int slot;
++
++ str += 2;
++ if (*str == '#')
++ str++;
++ if (*str < '0' || *str > '7' || str[1])
++ as_bad(_("invalid coprocessor `%s'"), str);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = *str - '0';
++}
++
++static void
++parse_cpreg(const struct avr32_operand *op, char *str,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned int crid;
++ int slot;
++ char *endptr;
++
++ str += 2;
++ crid = strtoul(str, &endptr, 10);
++ if (*endptr || crid > 15 || crid & ((1 << op->align_order) - 1))
++ as_bad(_("invalid coprocessor register `%s'"), str);
++
++ crid >>= op->align_order;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = crid;
++}
++
++static void
++parse_number(const struct avr32_operand *op, char *str,
++ int opindex ATTRIBUTE_UNUSED)
++{
++ expressionS exp;
++ int slot;
++ char *save;
++
++ save = input_line_pointer;
++ input_line_pointer = str;
++ expression(&exp);
++ input_line_pointer = save;
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].align_order = op->align_order;
++
++ if (exp.X_op == O_constant)
++ current_insn.field_value[slot].value = exp.X_add_number;
++ else
++ as_bad(_("invalid numeric expression `%s'"), str);
++}
++
++static void
++parse_reglist8(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask;
++ unsigned long value = 0;
++ int slot;
++ char *tail;
++
++ regmask = avr32_parse_reglist(str, &tail);
++ if (*tail)
++ as_bad(_("invalid register list `%s'"), str);
++ else
++ {
++ if (avr32_make_regmask8(regmask, &value))
++ as_bad(_("register list `%s' doesn't fit"), str);
++ }
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = value;
++}
++
++static int
++parse_reglist_tail(char *str, unsigned long regmask)
++{
++ expressionS exp;
++ char *save, *p, c;
++ int regid;
++
++ for (p = str + 1; *p; p++)
++ if (*p == '=')
++ break;
++
++ if (!*p)
++ {
++ as_bad(_("invalid register list `%s'"), str);
++ return -2;
++ }
++
++ c = *p, *p = 0;
++ regid = avr32_parse_intreg(str);
++ *p = c;
++
++ if (regid != 12)
++ {
++ as_bad(_("invalid register list `%s'"), str);
++ return -2;
++ }
++
++ /* If we have an assignment, we must pop PC and we must _not_
++ pop LR or R12 */
++ if (!(regmask & (1 << AVR32_REG_PC)))
++ {
++ as_bad(_("return value specified for non-return instruction"));
++ return -2;
++ }
++ else if (regmask & ((1 << AVR32_REG_R12) | (1 << AVR32_REG_LR)))
++ {
++ as_bad(_("can't pop LR or R12 when specifying return value"));
++ return -2;
++ }
++
++ save = input_line_pointer;
++ input_line_pointer = p + 1;
++ expression(&exp);
++ input_line_pointer = save;
++
++ if (exp.X_op != O_constant
++ || exp.X_add_number < -1
++ || exp.X_add_number > 1)
++ {
++ as_bad(_("invalid return value `%s'"), str);
++ return -2;
++ }
++
++ return exp.X_add_number;
++}
++
++static void
++parse_reglist9(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask;
++ unsigned long value = 0, kbit = 0;
++ int slot;
++ char *tail;
++
++ regmask = avr32_parse_reglist(str, &tail);
++ /* printf("parsed reglist16: %04lx, tail: `%s'\n", regmask, tail); */
++ if (*tail)
++ {
++ int retval;
++
++ retval = parse_reglist_tail(tail, regmask);
++
++ switch (retval)
++ {
++ case -1:
++ regmask |= 1 << AVR32_REG_LR;
++ break;
++ case 0:
++ break;
++ case 1:
++ regmask |= 1 << AVR32_REG_R12;
++ break;
++ default:
++ break;
++ }
++
++ kbit = 1;
++ }
++
++ if (avr32_make_regmask8(regmask, &value))
++ as_bad(_("register list `%s' doesn't fit"), str);
++
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = (value << 1) | kbit;
++}
++
++static void
++parse_reglist16(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask;
++ int slot;
++ char *tail;
++
++ regmask = avr32_parse_reglist(str, &tail);
++ if (*tail)
++ as_bad(_("invalid register list `%s'"), str);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regmask;
++}
++
++static void
++parse_reglist_ldm(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask;
++ int slot, rp, w_bit = 0;
++ char *tail, *p, c;
++
++ for (p = str; *p && *p != ','; p++)
++ if (*p == '+')
++ break;
++
++ c = *p, *p = 0;
++ rp = avr32_parse_intreg(str);
++ *p = c;
++ if (rp < 0)
++ {
++ as_bad(_("invalid destination register in `%s'"), str);
++ return;
++ }
++
++ if (p[0] == '+' && p[1] == '+')
++ {
++ w_bit = 1;
++ p += 2;
++ }
++
++ if (*p != ',')
++ {
++ as_bad(_("expected `,' after destination register in `%s'"), str);
++ return;
++ }
++
++ str = p + 1;
++ regmask = avr32_parse_reglist(str, &tail);
++ if (*tail)
++ {
++ int retval;
++
++ if (rp != AVR32_REG_SP)
++ {
++ as_bad(_("junk at end of line: `%s'"), tail);
++ return;
++ }
++
++ rp = AVR32_REG_PC;
++
++ retval = parse_reglist_tail(tail, regmask);
++
++ switch (retval)
++ {
++ case -1:
++ regmask |= 1 << AVR32_REG_LR;
++ break;
++ case 0:
++ break;
++ case 1:
++ regmask |= 1 << AVR32_REG_R12;
++ break;
++ default:
++ return;
++ }
++ }
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = rp;
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = w_bit;
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regmask;
++}
++
++static void
++parse_reglist_cp8(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask;
++ int slot, h_bit = 0;
++ char *tail;
++
++ regmask = avr32_parse_cpreglist(str, &tail);
++ if (*tail)
++ as_bad(_("junk at end of line: `%s'"), tail);
++ else if (regmask & 0xffUL)
++ {
++ if (regmask & 0xff00UL)
++ as_bad(_("register list `%s' doesn't fit"), str);
++ regmask &= 0xff;
++ }
++ else if (regmask & 0xff00UL)
++ {
++ regmask >>= 8;
++ h_bit = 1;
++ }
++ else
++ as_warn(_("register list is empty"));
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regmask;
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = h_bit;
++}
++
++static void
++parse_reglist_cpd8(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask, regmask_d = 0;
++ int slot, i;
++ char *tail;
++
++ regmask = avr32_parse_cpreglist(str, &tail);
++ if (*tail)
++ as_bad(_("junk at end of line: `%s'"), tail);
++
++ for (i = 0; i < 8; i++)
++ {
++ if (regmask & 1)
++ {
++ if (!(regmask & 2))
++ {
++ as_bad(_("register list `%s' doesn't fit"), str);
++ break;
++ }
++ regmask_d |= 1 << i;
++ }
++ else if (regmask & 2)
++ {
++ as_bad(_("register list `%s' doesn't fit"), str);
++ break;
++ }
++
++ regmask >>= 2;
++ }
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regmask_d;
++}
++
++static void
++parse_retval(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ int regid, slot;
++
++ regid = avr32_parse_intreg(str);
++ if (regid < 0)
++ {
++ expressionS exp;
++ char *save;
++
++ regid = 0;
++
++ save = input_line_pointer;
++ input_line_pointer = str;
++ expression(&exp);
++ input_line_pointer = save;
++
++ if (exp.X_op != O_constant)
++ as_bad(_("invalid return value `%s'"), str);
++ else
++ switch (exp.X_add_number)
++ {
++ case -1:
++ regid = AVR32_REG_LR;
++ break;
++ case 0:
++ regid = AVR32_REG_SP;
++ break;
++ case 1:
++ regid = AVR32_REG_PC;
++ break;
++ default:
++ as_bad(_("invalid return value `%s'"), str);
++ break;
++ }
++ }
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++}
++
++#define parse_mcall parse_intreg_disp
++
++static void
++parse_jospinc(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ expressionS exp;
++ int slot;
++ char *save;
++
++ save = input_line_pointer;
++ input_line_pointer = str;
++ expression(&exp);
++ input_line_pointer = save;
++
++ slot = current_insn.next_slot++;
++
++ if (exp.X_op == O_constant)
++ {
++ if (exp.X_add_number > 0)
++ exp.X_add_number--;
++ current_insn.field_value[slot].value = exp.X_add_number;
++ }
++ else
++ as_bad(_("invalid numeric expression `%s'"), str);
++}
++
++#define parse_coh parse_nothing
++#if 0
++static void
++parse_fpreg(const struct avr32_operand *op,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regid;
++ int slot;
++
++ regid = strtoul(str + 2, NULL, 10);
++
++ if ((regid >= 16) || (regid & ((1 << op->align_order) - 1)))
++ as_bad(_("invalid floating-point register `%s'"), str);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++ current_insn.field_value[slot].align_order = op->align_order;
++}
++#endif
++
++static void
++parse_picoreg(const struct avr32_operand *op,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regid;
++ int slot;
++
++ regid = avr32_parse_picoreg(str);
++ if (regid & ((1 << op->align_order) - 1))
++ as_bad(_("invalid double-word PiCo register `%s'"), str);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++ current_insn.field_value[slot].align_order = op->align_order;
++}
++
++static void
++parse_pico_reglist_w(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask;
++ int slot, h_bit = 0;
++ char *tail;
++
++ regmask = avr32_parse_pico_reglist(str, &tail);
++ if (*tail)
++ as_bad(_("junk at end of line: `%s'"), tail);
++
++ if (regmask & 0x00ffUL)
++ {
++ if (regmask & 0xff00UL)
++ as_bad(_("register list `%s' doesn't fit"), str);
++ regmask &= 0x00ffUL;
++ }
++ else if (regmask & 0xff00UL)
++ {
++ regmask >>= 8;
++ h_bit = 1;
++ }
++ else
++ as_warn(_("register list is empty"));
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regmask;
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = h_bit;
++}
++
++static void
++parse_pico_reglist_d(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regmask, regmask_d = 0;
++ int slot, i;
++ char *tail;
++
++ regmask = avr32_parse_pico_reglist(str, &tail);
++ if (*tail)
++ as_bad(_("junk at end of line: `%s'"), tail);
++
++ for (i = 0; i < 8; i++)
++ {
++ if (regmask & 1)
++ {
++ if (!(regmask & 2))
++ {
++ as_bad(_("register list `%s' doesn't fit"), str);
++ break;
++ }
++ regmask_d |= 1 << i;
++ }
++ else if (regmask & 2)
++ {
++ as_bad(_("register list `%s' doesn't fit"), str);
++ break;
++ }
++
++ regmask >>= 2;
++ }
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regmask_d;
++}
++
++static void
++parse_pico_in(const struct avr32_operand *op ATTRIBUTE_UNUSED,
++ char *str, int opindex ATTRIBUTE_UNUSED)
++{
++ unsigned long regid;
++ int slot;
++
++ regid = strtoul(str + 2, NULL, 10);
++
++ if (regid >= 12)
++ as_bad(_("invalid PiCo IN register `%s'"), str);
++
++ slot = current_insn.next_slot++;
++ current_insn.field_value[slot].value = regid;
++ current_insn.field_value[slot].align_order = 0;
++}
++
++#define parse_pico_out0 parse_nothing
++#define parse_pico_out1 parse_nothing
++#define parse_pico_out2 parse_nothing
++#define parse_pico_out3 parse_nothing
++
++#define OP(name, sgn, pcrel, align, func) \
++ { AVR32_OPERAND_##name, sgn, pcrel, align, match_##func, parse_##func }
++
++struct avr32_operand avr32_operand_table[] = {
++ OP(INTREG, 0, 0, 0, intreg),
++ OP(INTREG_PREDEC, 0, 0, 0, intreg_predec),
++ OP(INTREG_POSTINC, 0, 0, 0, intreg_postinc),
++ OP(INTREG_LSL, 0, 0, 0, intreg_lsl),
++ OP(INTREG_LSR, 0, 0, 0, intreg_lsr),
++ OP(INTREG_BSEL, 0, 0, 0, intreg_part),
++ OP(INTREG_HSEL, 0, 0, 1, intreg_part),
++ OP(INTREG_SDISP, 1, 0, 0, intreg_disp),
++ OP(INTREG_SDISP_H, 1, 0, 1, intreg_disp),
++ OP(INTREG_SDISP_W, 1, 0, 2, intreg_disp),
++ OP(INTREG_UDISP, 0, 0, 0, intreg_disp),
++ OP(INTREG_UDISP_H, 0, 0, 1, intreg_disp),
++ OP(INTREG_UDISP_W, 0, 0, 2, intreg_disp),
++ OP(INTREG_INDEX, 0, 0, 0, intreg_index),
++ OP(INTREG_XINDEX, 0, 0, 0, intreg_xindex),
++ OP(DWREG, 0, 0, 1, intreg),
++ OP(PC_UDISP_W, 0, 1, 2, pc_disp),
++ OP(SP, 0, 0, 0, sp),
++ OP(SP_UDISP_W, 0, 0, 2, sp_disp),
++ OP(CPNO, 0, 0, 0, cpno),
++ OP(CPREG, 0, 0, 0, cpreg),
++ OP(CPREG_D, 0, 0, 1, cpreg),
++ OP(UNSIGNED_CONST, 0, 0, 0, const),
++ OP(UNSIGNED_CONST_W, 0, 0, 2, const),
++ OP(SIGNED_CONST, 1, 0, 0, const),
++ OP(SIGNED_CONST_W, 1, 0, 2, const),
++ OP(JMPLABEL, 1, 1, 1, jmplabel),
++ OP(UNSIGNED_NUMBER, 0, 0, 0, number),
++ OP(UNSIGNED_NUMBER_W, 0, 0, 2, number),
++ OP(REGLIST8, 0, 0, 0, reglist8),
++ OP(REGLIST9, 0, 0, 0, reglist9),
++ OP(REGLIST16, 0, 0, 0, reglist16),
++ OP(REGLIST_LDM, 0, 0, 0, reglist_ldm),
++ OP(REGLIST_CP8, 0, 0, 0, reglist_cp8),
++ OP(REGLIST_CPD8, 0, 0, 0, reglist_cpd8),
++ OP(RETVAL, 0, 0, 0, retval),
++ OP(MCALL, 1, 0, 2, mcall),
++ OP(JOSPINC, 0, 0, 0, jospinc),
++ OP(COH, 0, 0, 0, coh),
++ OP(PICO_REG_W, 0, 0, 0, picoreg),
++ OP(PICO_REG_D, 0, 0, 1, picoreg),
++ OP(PICO_REGLIST_W, 0, 0, 0, pico_reglist_w),
++ OP(PICO_REGLIST_D, 0, 0, 0, pico_reglist_d),
++ OP(PICO_IN, 0, 0, 0, pico_in),
++ OP(PICO_OUT0, 0, 0, 0, pico_out0),
++ OP(PICO_OUT1, 0, 0, 0, pico_out1),
++ OP(PICO_OUT2, 0, 0, 0, pico_out2),
++ OP(PICO_OUT3, 0, 0, 0, pico_out3),
++};
++
++symbolS *
++md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
++{
++ pr_debug("md_undefined_symbol: %s\n", name);
++ return 0;
++}
++
++struct avr32_relax_type
++{
++ long lower_bound;
++ long upper_bound;
++ unsigned char align;
++ unsigned char length;
++ signed short next;
++};
++
++#define EMPTY { 0, 0, 0, 0, -1 }
++#define C(lower, upper, align, next) \
++ { (lower), (upper), (align), 2, AVR32_OPC_##next }
++#define E(lower, upper, align) \
++ { (lower), (upper), (align), 4, -1 }
++
++static const struct avr32_relax_type avr32_relax_table[] =
++ {
++ /* 0 */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY,
++ E(0, 65535, 0), E(0, 65535, 0), E(0, 65535, 0), E(0, 65535, 0),
++ EMPTY,
++ /* 16 */
++ EMPTY, EMPTY, EMPTY, EMPTY,
++
++ C(-256, 254, 1, BREQ2), C(-256, 254, 1, BRNE2),
++ C(-256, 254, 1, BRCC2), C(-256, 254, 1, BRCS2),
++ C(-256, 254, 1, BRGE2), C(-256, 254, 1, BRLT2),
++ C(-256, 254, 1, BRMI2), C(-256, 254, 1, BRPL2),
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++ /* 32 */
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++ E(-2097152, 2097150, 1), E(-2097152, 2097150, 1),
++
++ EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 48 */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY,
++
++ C(-32, 31, 0, CP_W3), E(-1048576, 1048575, 0),
++
++ EMPTY, EMPTY, EMPTY,
++ /* 64: csrfcz */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ E(0, 65535, 0), E(0, 65535, 0),
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ E(-32768, 32767, 0),
++ /* 80: LD_SB2 */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++
++ C(0, 7, 0, LD_UB4), E(-32768, 32767, 0),
++
++ EMPTY,
++ EMPTY, EMPTY,
++
++ C(0, 14, 1, LD_SH4), E(-32768, 32767, 0),
++
++ EMPTY, EMPTY, EMPTY,
++
++ C(0, 14, 1, LD_UH4),
++
++ /* 96: LD_UH4 */
++ E(-32768, 32767, 0),
++
++ EMPTY, EMPTY, EMPTY, EMPTY,
++
++ C(0, 124, 2, LD_W4), E(-32768, 32767, 0),
++
++ E(0, 1020, 2), /* LDC_D1 */
++ EMPTY, EMPTY,
++ E(0, 1020, 2), /* LDC_W1 */
++ EMPTY, EMPTY,
++ E(0, 16380, 2), /* LDC0_D */
++ E(0, 16380, 2), /* LDC0_W */
++ EMPTY,
++
++ /* 112: LDCM_D_PU */
++ EMPTY, EMPTY, EMPTY,
++
++ C(0, 508, 2, LDDPC_EXT), E(-32768, 32767, 0),
++
++ EMPTY,EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 134: MACHH_W */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ E(-131072, 131068, 2), /* MCALL */
++ E(0, 1020, 2), /* MFDR */
++ E(0, 1020, 2), /* MFSR */
++ EMPTY, EMPTY,
++
++ C(-128, 127, 0, MOV2), E(-1048576, 1048575, 0),
++
++ EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++
++ E(-128, 127, 0), /* MOVEQ2 */
++ E(-128, 127, 0), /* MOVNE2 */
++ E(-128, 127, 0), /* MOVCC2 */
++ E(-128, 127, 0), /* 166: MOVCS2 */
++ E(-128, 127, 0), /* MOVGE2 */
++ E(-128, 127, 0), /* MOVLT2 */
++ E(-128, 127, 0), /* MOVMI2 */
++ E(-128, 127, 0), /* MOVPL2 */
++ E(-128, 127, 0), /* MOVLS2 */
++ E(-128, 127, 0), /* MOVGT2 */
++ E(-128, 127, 0), /* MOVLE2 */
++ E(-128, 127, 0), /* MOVHI2 */
++ E(-128, 127, 0), /* MOVVS2 */
++ E(-128, 127, 0), /* MOVVC2 */
++ E(-128, 127, 0), /* MOVQS2 */
++ E(-128, 127, 0), /* MOVAL2 */
++
++ E(0, 1020, 2), /* MTDR */
++ E(0, 1020, 2), /* MTSR */
++ EMPTY,
++ EMPTY,
++ E(-128, 127, 0), /* MUL3 */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 198: MVCR_W */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ E(0, 65535, 0), E(0, 65535, 0),
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 230: PASR_H */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 262: PUNPCKSB_H */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++
++ C(-1024, 1022, 1, RCALL2), E(-2097152, 2097150, 1),
++
++ EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY,
++
++ C(-1024, 1022, 1, BRAL),
++
++ EMPTY, EMPTY, EMPTY,
++ E(-128, 127, 0), /* RSUB2 */
++ /* 294: SATADD_H */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ E(0, 255, 0), /* SLEEP */
++ EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 326: ST_B2 */
++ EMPTY, EMPTY,
++ C(0, 7, 0, ST_B4), E(-32768, 32767, 0),
++ EMPTY, EMPTY, EMPTY, EMPTY,
++ E(-32768, 32767, 0),
++ EMPTY, EMPTY, EMPTY,
++ C(0, 14, 1, ST_H4), E(-32768, 32767, 0),
++ EMPTY, EMPTY,
++ EMPTY,
++ C(0, 60, 2, ST_W4), E(-32768, 32767, 0),
++ E(0, 1020, 2), /* STC_D1 */
++ EMPTY, EMPTY,
++ E(0, 1020, 2), /* STC_W1 */
++ EMPTY, EMPTY,
++ E(0, 16380, 2), /* STC0_D */
++ E(0, 16380, 2), /* STC0_W */
++
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 358: STDSP */
++ EMPTY, EMPTY,
++ E(0, 1020, 2), /* STHH_W1 */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY,
++ E(-32768, 32767, 0),
++ C(-512, 508, 2, SUB4),
++ C(-128, 127, 0, SUB4), E(-1048576, 1048576, 0),
++ /* SUB{cond} */
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ /* SUBF{cond} */
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ EMPTY,
++
++ /* 406: SWAP_B */
++ EMPTY, EMPTY, EMPTY,
++ E(0, 255, 0), /* SYNC */
++ EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 414: TST */
++ EMPTY, EMPTY, E(-65536, 65535, 2), E(-65536, 65535, 2), E(-65536, 65535, 2), EMPTY, EMPTY, EMPTY,
++ /* 422: RSUB{cond} */
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0), E(-128, 127, 0),
++ /* 436: ADD{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 454: SUB{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 472: AND{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 486: OR{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 502: EOR{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 518: LD.w{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 534: LD.sh{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 550: LD.uh{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 566: LD.sb{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 582: LD.ub{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 596: ST.w{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 614: ST.h{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 630: ST.b{cond} */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ /* 646: movh */
++ E(0, 65535, 0), EMPTY, EMPTY,
++ /* 649: fmac.s */
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,
++ };
++
++#undef E
++#undef C
++#undef EMPTY
++
++#define AVR32_RS_NONE (-1)
++
++#define avr32_rs_size(state) (avr32_relax_table[(state)].length)
++#define avr32_rs_align(state) (avr32_relax_table[(state)].align)
++#define relax_more(state) (avr32_relax_table[(state)].next)
++
++#define opc_initial_substate(opc) ((opc)->id)
++
++static int need_relax(int subtype, offsetT distance)
++{
++ offsetT upper_bound, lower_bound;
++
++ upper_bound = avr32_relax_table[subtype].upper_bound;
++ lower_bound = avr32_relax_table[subtype].lower_bound;
++
++ if (distance & ((1 << avr32_rs_align(subtype)) - 1))
++ return 1;
++ if ((distance > upper_bound) || (distance < lower_bound))
++ return 1;
++
++ return 0;
++}
++
++enum {
++ LDA_SUBTYPE_MOV1,
++ LDA_SUBTYPE_MOV2,
++ LDA_SUBTYPE_SUB,
++ LDA_SUBTYPE_LDDPC,
++ LDA_SUBTYPE_LDW,
++ LDA_SUBTYPE_GOTLOAD,
++ LDA_SUBTYPE_GOTLOAD_LARGE,
++};
++
++enum {
++ CALL_SUBTYPE_RCALL1,
++ CALL_SUBTYPE_RCALL2,
++ CALL_SUBTYPE_MCALL_CP,
++ CALL_SUBTYPE_MCALL_GOT,
++ CALL_SUBTYPE_MCALL_LARGE,
++};
++
++#define LDA_INITIAL_SIZE (avr32_pic ? 4 : 2)
++#define CALL_INITIAL_SIZE 2
++
++#define need_reloc(sym, seg, pcrel) \
++ (!(S_IS_DEFINED(sym) \
++ && ((pcrel && S_GET_SEGMENT(sym) == seg) \
++ || (!pcrel && S_GET_SEGMENT(sym) == absolute_section))) \
++ || S_FORCE_RELOC(sym, 1))
++
++/* Return an initial guess of the length by which a fragment must grow to
++ hold a branch to reach its destination.
++ Also updates fr_type/fr_subtype as necessary.
++
++ Called just before doing relaxation.
++ Any symbol that is now undefined will not become defined.
++ The guess for fr_var is ACTUALLY the growth beyond fr_fix.
++ Whatever we do to grow fr_fix or fr_var contributes to our returned value.
++ Although it may not be explicit in the frag, pretend fr_var starts with a
++ 0 value. */
++
++static int
++avr32_default_estimate_size_before_relax (fragS *fragP, segT segment)
++{
++ int growth = 0;
++
++ assert(fragP);
++ assert(fragP->fr_symbol);
++
++ if (fragP->tc_frag_data.force_extended
++ || need_reloc(fragP->fr_symbol, segment, fragP->tc_frag_data.pcrel))
++ {
++ int largest_state = fragP->fr_subtype;
++ while (relax_more(largest_state) != AVR32_RS_NONE)
++ largest_state = relax_more(largest_state);
++ growth = avr32_rs_size(largest_state) - fragP->fr_var;
++ }
++ else
++ {
++ growth = avr32_rs_size(fragP->fr_subtype) - fragP->fr_var;
++ }
++
++ pr_debug("%s:%d: md_estimate_size_before_relax: %d\n",
++ fragP->fr_file, fragP->fr_line, growth);
++
++ return growth;
++}
++
++static int
++avr32_lda_estimate_size_before_relax(fragS *fragP, segT segment ATTRIBUTE_UNUSED)
++{
++ return fragP->fr_var - LDA_INITIAL_SIZE;
++}
++
++static int
++avr32_call_estimate_size_before_relax(fragS *fragP, segT segment ATTRIBUTE_UNUSED)
++{
++ return fragP->fr_var - CALL_INITIAL_SIZE;
++}
++
++static int
++avr32_cpool_estimate_size_before_relax(fragS *fragP,
++ segT segment ATTRIBUTE_UNUSED)
++{
++ return fragP->fr_var;
++}
++
++/* This macro may be defined to relax a frag. GAS will call this with the
++ * segment, the frag, and the change in size of all previous frags;
++ * md_relax_frag should return the change in size of the frag. */
++static long
++avr32_default_relax_frag (segT segment, fragS *fragP, long stretch)
++{
++ int state, next_state;
++ symbolS *symbolP; /* The target symbol */
++ long growth = 0;
++
++ state = next_state = fragP->fr_subtype;
++
++ symbolP = fragP->fr_symbol;
++
++ if (fragP->tc_frag_data.force_extended
++ || need_reloc(symbolP, segment, fragP->tc_frag_data.pcrel))
++ {
++ /* Symbol must be resolved by the linker. Emit the largest
++ possible opcode. */
++ while (relax_more(next_state) != AVR32_RS_NONE)
++ next_state = relax_more(next_state);
++ }
++ else
++ {
++ addressT address; /* The address of fragP */
++ addressT target; /* The address of the target symbol */
++ offsetT distance; /* The distance between the insn and the symbol */
++ fragS *sym_frag;
++
++ address = fragP->fr_address;
++ target = fragP->fr_offset;
++ symbolP = fragP->fr_symbol;
++ sym_frag = symbol_get_frag(symbolP);
++
++ address += fragP->fr_fix - fragP->fr_var;
++ target += S_GET_VALUE(symbolP);
++
++ if (stretch != 0
++ && sym_frag->relax_marker != fragP->relax_marker
++ && S_GET_SEGMENT(symbolP) == segment)
++ /* if it was correctly aligned before, make sure it stays aligned */
++ target += stretch & (~0UL << avr32_rs_align(state));
++
++ if (fragP->tc_frag_data.pcrel)
++ distance = target - (address & (~0UL << avr32_rs_align(state)));
++ else
++ distance = target;
++
++ pr_debug("%s:%d: relax more? 0x%x - 0x%x = 0x%x (%d), align %d\n",
++ fragP->fr_file, fragP->fr_line, target, address,
++ distance, distance, avr32_rs_align(state));
++
++ if (need_relax(state, distance))
++ {
++ if (relax_more(state) != AVR32_RS_NONE)
++ next_state = relax_more(state);
++ pr_debug("%s:%d: relax more %d -> %d (%d - %d, align %d)\n",
++ fragP->fr_file, fragP->fr_line, state, next_state,
++ target, address, avr32_rs_align(state));
++ }
++ }
++
++ growth = avr32_rs_size(next_state) - avr32_rs_size(state);
++ fragP->fr_subtype = next_state;
++
++ pr_debug("%s:%d: md_relax_frag: growth=%d, subtype=%d, opc=0x%08lx\n",
++ fragP->fr_file, fragP->fr_line, growth, fragP->fr_subtype,
++ avr32_opc_table[next_state].value);
++
++ return growth;
++}
++
++static long
++avr32_lda_relax_frag(segT segment, fragS *fragP, long stretch)
++{
++ struct cpool *pool= NULL;
++ unsigned int entry = 0;
++ addressT address, target;
++ offsetT distance;
++ symbolS *symbolP;
++ fragS *sym_frag;
++ long old_size, new_size;
++
++ symbolP = fragP->fr_symbol;
++ old_size = fragP->fr_var;
++ if (!avr32_pic)
++ {
++ pool = fragP->tc_frag_data.pool;
++ entry = fragP->tc_frag_data.pool_entry;
++ }
++
++ address = fragP->fr_address;
++ address += fragP->fr_fix - LDA_INITIAL_SIZE;
++
++ if (!S_IS_DEFINED(symbolP) || S_FORCE_RELOC(symbolP, 1))
++ goto relax_max;
++
++ target = fragP->fr_offset;
++ sym_frag = symbol_get_frag(symbolP);
++ target += S_GET_VALUE(symbolP);
++
++ if (sym_frag->relax_marker != fragP->relax_marker
++ && S_GET_SEGMENT(symbolP) == segment)
++ target += stretch;
++
++ distance = target - address;
++
++ pr_debug("lda_relax_frag: target: %d, address: %d, var: %d\n",
++ target, address, fragP->fr_var);
++
++ if (!avr32_pic && S_GET_SEGMENT(symbolP) == absolute_section
++ && target <= 127 && (offsetT)target >= -128)
++ {
++ if (fragP->fr_subtype == LDA_SUBTYPE_LDDPC
++ || fragP->fr_subtype == LDA_SUBTYPE_LDW)
++ pool->literals[entry].refcount--;
++ new_size = 2;
++ fragP->fr_subtype = LDA_SUBTYPE_MOV1;
++ }
++ else if (!avr32_pic && S_GET_SEGMENT(symbolP) == absolute_section
++ && target <= 1048575 && (offsetT)target >= -1048576)
++ {
++ if (fragP->fr_subtype == LDA_SUBTYPE_LDDPC
++ || fragP->fr_subtype == LDA_SUBTYPE_LDW)
++ pool->literals[entry].refcount--;
++ new_size = 4;
++ fragP->fr_subtype = LDA_SUBTYPE_MOV2;
++ }
++ else if (!linkrelax && S_GET_SEGMENT(symbolP) == segment
++ /* the field will be negated, so this is really -(-32768)
++ and -(32767) */
++ && distance <= 32768 && distance >= -32767)
++ {
++ if (!avr32_pic
++ && (fragP->fr_subtype == LDA_SUBTYPE_LDDPC
++ || fragP->fr_subtype == LDA_SUBTYPE_LDW))
++ pool->literals[entry].refcount--;
++ new_size = 4;
++ fragP->fr_subtype = LDA_SUBTYPE_SUB;
++ }
++ else
++ {
++ relax_max:
++ if (avr32_pic)
++ {
++ if (linkrelax)
++ {
++ new_size = 8;
++ fragP->fr_subtype = LDA_SUBTYPE_GOTLOAD_LARGE;
++ }
++ else
++ {
++ new_size = 4;
++ fragP->fr_subtype = LDA_SUBTYPE_GOTLOAD;
++ }
++ }
++ else
++ {
++ if (fragP->fr_subtype != LDA_SUBTYPE_LDDPC
++ && fragP->fr_subtype != LDA_SUBTYPE_LDW)
++ pool->literals[entry].refcount++;
++
++ sym_frag = symbol_get_frag(pool->symbol);
++ target = (sym_frag->fr_address + sym_frag->fr_fix
++ + pool->padding + pool->literals[entry].offset);
++
++ pr_debug("cpool sym address: 0x%lx\n",
++ sym_frag->fr_address + sym_frag->fr_fix);
++
++ know(pool->section == segment);
++
++ if (sym_frag->relax_marker != fragP->relax_marker)
++ target += stretch;
++
++ distance = target - address;
++ if (distance <= 508 && distance >= 0)
++ {
++ new_size = 2;
++ fragP->fr_subtype = LDA_SUBTYPE_LDDPC;
++ }
++ else
++ {
++ new_size = 4;
++ fragP->fr_subtype = LDA_SUBTYPE_LDW;
++ }
++
++ pr_debug("lda_relax_frag (cpool): target=0x%lx, address=0x%lx, refcount=%d\n",
++ target, address, pool->literals[entry].refcount);
++ }
++ }
++
++ fragP->fr_var = new_size;
++
++ pr_debug("%s:%d: lda: relax pass done. subtype: %d, growth: %ld\n",
++ fragP->fr_file, fragP->fr_line,
++ fragP->fr_subtype, new_size - old_size);
++
++ return new_size - old_size;
++}
++
++static long
++avr32_call_relax_frag(segT segment, fragS *fragP, long stretch)
++{
++ struct cpool *pool = NULL;
++ unsigned int entry = 0;
++ addressT address, target;
++ offsetT distance;
++ symbolS *symbolP;
++ fragS *sym_frag;
++ long old_size, new_size;
++
++ symbolP = fragP->fr_symbol;
++ old_size = fragP->fr_var;
++ if (!avr32_pic)
++ {
++ pool = fragP->tc_frag_data.pool;
++ entry = fragP->tc_frag_data.pool_entry;
++ }
++
++ address = fragP->fr_address;
++ address += fragP->fr_fix - CALL_INITIAL_SIZE;
++
++ if (need_reloc(symbolP, segment, 1))
++ {
++ pr_debug("call: must emit reloc\n");
++ goto relax_max;
++ }
++
++ target = fragP->fr_offset;
++ sym_frag = symbol_get_frag(symbolP);
++ target += S_GET_VALUE(symbolP);
++
++ if (sym_frag->relax_marker != fragP->relax_marker
++ && S_GET_SEGMENT(symbolP) == segment)
++ target += stretch;
++
++ distance = target - address;
++
++ if (distance <= 1022 && distance >= -1024)
++ {
++ pr_debug("call: distance is %d, emitting short rcall\n", distance);
++ if (!avr32_pic && fragP->fr_subtype == CALL_SUBTYPE_MCALL_CP)
++ pool->literals[entry].refcount--;
++ new_size = 2;
++ fragP->fr_subtype = CALL_SUBTYPE_RCALL1;
++ }
++ else if (distance <= 2097150 && distance >= -2097152)
++ {
++ pr_debug("call: distance is %d, emitting long rcall\n", distance);
++ if (!avr32_pic && fragP->fr_subtype == CALL_SUBTYPE_MCALL_CP)
++ pool->literals[entry].refcount--;
++ new_size = 4;
++ fragP->fr_subtype = CALL_SUBTYPE_RCALL2;
++ }
++ else
++ {
++ pr_debug("call: distance %d too far, emitting something big\n", distance);
++
++ relax_max:
++ if (avr32_pic)
++ {
++ if (linkrelax)
++ {
++ new_size = 10;
++ fragP->fr_subtype = CALL_SUBTYPE_MCALL_LARGE;
++ }
++ else
++ {
++ new_size = 4;
++ fragP->fr_subtype = CALL_SUBTYPE_MCALL_GOT;
++ }
++ }
++ else
++ {
++ if (fragP->fr_subtype != CALL_SUBTYPE_MCALL_CP)
++ pool->literals[entry].refcount++;
++
++ new_size = 4;
++ fragP->fr_subtype = CALL_SUBTYPE_MCALL_CP;
++ }
++ }
++
++ fragP->fr_var = new_size;
++
++ pr_debug("%s:%d: call: relax pass done, growth: %d, fr_var: %d\n",
++ fragP->fr_file, fragP->fr_line,
++ new_size - old_size, fragP->fr_var);
++
++ return new_size - old_size;
++}
++
++static long
++avr32_cpool_relax_frag(segT segment ATTRIBUTE_UNUSED,
++ fragS *fragP,
++ long stretch ATTRIBUTE_UNUSED)
++{
++ struct cpool *pool;
++ addressT address;
++ long old_size, new_size;
++ unsigned int entry;
++
++ pool = fragP->tc_frag_data.pool;
++ address = fragP->fr_address + fragP->fr_fix;
++ old_size = fragP->fr_var;
++ new_size = 0;
++
++ for (entry = 0; entry < pool->next_free_entry; entry++)
++ {
++ if (pool->literals[entry].refcount > 0)
++ {
++ pool->literals[entry].offset = new_size;
++ new_size += 4;
++ }
++ }
++
++ fragP->fr_var = new_size;
++
++ return new_size - old_size;
++}
++
++/* *fragP has been relaxed to its final size, and now needs to have
++ the bytes inside it modified to conform to the new size.
++
++ Called after relaxation is finished.
++ fragP->fr_type == rs_machine_dependent.
++ fragP->fr_subtype is the subtype of what the address relaxed to. */
++
++static void
++avr32_default_convert_frag (bfd *abfd ATTRIBUTE_UNUSED,
++ segT segment ATTRIBUTE_UNUSED,
++ fragS *fragP)
++{
++ const struct avr32_opcode *opc;
++ const struct avr32_ifield *ifield;
++ bfd_reloc_code_real_type r_type;
++ symbolS *symbolP;
++ fixS *fixP;
++ bfd_vma value;
++ int subtype;
++
++ opc = &avr32_opc_table[fragP->fr_subtype];
++ ifield = opc->fields[opc->var_field];
++ symbolP = fragP->fr_symbol;
++ subtype = fragP->fr_subtype;
++ r_type = opc->reloc_type;
++
++ /* Clear the opcode bits and the bits belonging to the relaxed
++ field. We assume all other fields stay the same. */
++ value = bfd_getb32(fragP->fr_opcode);
++ value &= ~(opc->mask | ifield->mask);
++
++ /* Insert the new opcode */
++ value |= opc->value;
++ bfd_putb32(value, fragP->fr_opcode);
++
++ fragP->fr_fix += opc->size - fragP->fr_var;
++
++ if (fragP->tc_frag_data.reloc_info != AVR32_OPINFO_NONE)
++ {
++ switch (fragP->tc_frag_data.reloc_info)
++ {
++ case AVR32_OPINFO_HI:
++ r_type = BFD_RELOC_HI16;
++ break;
++ case AVR32_OPINFO_LO:
++ r_type = BFD_RELOC_LO16;
++ break;
++ case AVR32_OPINFO_GOT:
++ switch (r_type)
++ {
++ case BFD_RELOC_AVR32_18W_PCREL:
++ r_type = BFD_RELOC_AVR32_GOT18SW;
++ break;
++ case BFD_RELOC_AVR32_16S:
++ r_type = BFD_RELOC_AVR32_GOT16S;
++ break;
++ default:
++ BAD_CASE(r_type);
++ break;
++ }
++ break;
++ default:
++ BAD_CASE(fragP->tc_frag_data.reloc_info);
++ break;
++ }
++ }
++
++ pr_debug("%s:%d: convert_frag: new %s fixup\n",
++ fragP->fr_file, fragP->fr_line,
++ bfd_get_reloc_code_name(r_type));
++
++#if 1
++ fixP = fix_new_exp(fragP, fragP->fr_fix - opc->size, opc->size,
++ &fragP->tc_frag_data.exp,
++ fragP->tc_frag_data.pcrel, r_type);
++#else
++ fixP = fix_new(fragP, fragP->fr_fix - opc->size, opc->size, symbolP,
++ fragP->fr_offset, fragP->tc_frag_data.pcrel, r_type);
++#endif
++
++ /* Revert fix_new brain damage. "dot_value" is the value of PC at
++ the point of the fixup, relative to the frag address. fix_new()
++ and friends think they are only being called during the assembly
++ pass, not during relaxation or similar, so fx_dot_value, fx_file
++ and fx_line are all initialized to the wrong value. But we don't
++ know the size of the fixup until now, so we really can't live up
++ to the assumptions these functions make about the target. What
++ do these functions think the "where" and "frag" argument mean
++ anyway? */
++ fixP->fx_dot_value = fragP->fr_fix - opc->size;
++ fixP->fx_file = fragP->fr_file;
++ fixP->fx_line = fragP->fr_line;
++
++ fixP->tc_fix_data.ifield = ifield;
++ fixP->tc_fix_data.align = avr32_rs_align(subtype);
++ fixP->tc_fix_data.min = avr32_relax_table[subtype].lower_bound;
++ fixP->tc_fix_data.max = avr32_relax_table[subtype].upper_bound;
++}
++
++static void
++avr32_lda_convert_frag(bfd *abfd ATTRIBUTE_UNUSED,
++ segT segment ATTRIBUTE_UNUSED,
++ fragS *fragP)
++{
++ const struct avr32_opcode *opc;
++ const struct avr32_ifield *ifield;
++ bfd_reloc_code_real_type r_type;
++ expressionS exp;
++ struct cpool *pool;
++ fixS *fixP;
++ bfd_vma value;
++ int regid, pcrel = 0, align = 0;
++ char *p;
++
++ r_type = BFD_RELOC_NONE;
++ regid = fragP->tc_frag_data.reloc_info;
++ p = fragP->fr_opcode;
++ exp.X_add_symbol = fragP->fr_symbol;
++ exp.X_add_number = fragP->fr_offset;
++ exp.X_op = O_symbol;
++
++ pr_debug("%s:%d: lda_convert_frag, subtype: %d, fix: %d, var: %d, regid: %d\n",
++ fragP->fr_file, fragP->fr_line,
++ fragP->fr_subtype, fragP->fr_fix, fragP->fr_var, regid);
++
++ switch (fragP->fr_subtype)
++ {
++ case LDA_SUBTYPE_MOV1:
++ opc = &avr32_opc_table[AVR32_OPC_MOV1];
++ opc->fields[0]->insert(opc->fields[0], p, regid);
++ ifield = opc->fields[1];
++ r_type = opc->reloc_type;
++ break;
++ case LDA_SUBTYPE_MOV2:
++ opc = &avr32_opc_table[AVR32_OPC_MOV2];
++ opc->fields[0]->insert(opc->fields[0], p, regid);
++ ifield = opc->fields[1];
++ r_type = opc->reloc_type;
++ break;
++ case LDA_SUBTYPE_SUB:
++ opc = &avr32_opc_table[AVR32_OPC_SUB5];
++ opc->fields[0]->insert(opc->fields[0], p, regid);
++ opc->fields[1]->insert(opc->fields[1], p, AVR32_REG_PC);
++ ifield = opc->fields[2];
++ r_type = BFD_RELOC_AVR32_16N_PCREL;
++
++ /* Pretend that SUB5 isn't a "negated" pcrel expression for now.
++ We'll have to fix it up later when we know whether to
++ generate a reloc for it (in which case the linker will negate
++ it, so we shouldn't). */
++ pcrel = 1;
++ break;
++ case LDA_SUBTYPE_LDDPC:
++ opc = &avr32_opc_table[AVR32_OPC_LDDPC];
++ align = 2;
++ r_type = BFD_RELOC_AVR32_9W_CP;
++ goto cpool_common;
++ case LDA_SUBTYPE_LDW:
++ opc = &avr32_opc_table[AVR32_OPC_LDDPC_EXT];
++ r_type = BFD_RELOC_AVR32_16_CP;
++ cpool_common:
++ opc->fields[0]->insert(opc->fields[0], p, regid);
++ ifield = opc->fields[1];
++ pool = fragP->tc_frag_data.pool;
++ exp.X_add_symbol = pool->symbol;
++ exp.X_add_number = pool->literals[fragP->tc_frag_data.pool_entry].offset;
++ pcrel = 1;
++ break;
++ case LDA_SUBTYPE_GOTLOAD_LARGE:
++ /* ld.w Rd, r6[Rd << 2] (last) */
++ opc = &avr32_opc_table[AVR32_OPC_LD_W5];
++ bfd_putb32(opc->value, p + 4);
++ opc->fields[0]->insert(opc->fields[0], p + 4, regid);
++ opc->fields[1]->insert(opc->fields[1], p + 4, 6);
++ opc->fields[2]->insert(opc->fields[2], p + 4, regid);
++ opc->fields[3]->insert(opc->fields[3], p + 4, 2);
++
++ /* mov Rd, (got_offset / 4) */
++ opc = &avr32_opc_table[AVR32_OPC_MOV2];
++ opc->fields[0]->insert(opc->fields[0], p, regid);
++ ifield = opc->fields[1];
++ r_type = BFD_RELOC_AVR32_LDA_GOT;
++ break;
++ case LDA_SUBTYPE_GOTLOAD:
++ opc = &avr32_opc_table[AVR32_OPC_LD_W4];
++ opc->fields[0]->insert(opc->fields[0], p, regid);
++ opc->fields[1]->insert(opc->fields[1], p, 6);
++ ifield = opc->fields[2];
++ if (r_type == BFD_RELOC_NONE)
++ r_type = BFD_RELOC_AVR32_GOT16S;
++ break;
++ default:
++ BAD_CASE(fragP->fr_subtype);
++ }
++
++ value = bfd_getb32(p);
++ value &= ~(opc->mask | ifield->mask);
++ value |= opc->value;
++ bfd_putb32(value, p);
++
++ fragP->fr_fix += fragP->fr_var - LDA_INITIAL_SIZE;
++
++ if (fragP->fr_next
++ && ((offsetT)(fragP->fr_next->fr_address - fragP->fr_address)
++ != fragP->fr_fix))
++ {
++ fprintf(stderr, "LDA frag: fr_fix is wrong! fragP->fr_var = %ld, r_type = %s\n",
++ fragP->fr_var, bfd_get_reloc_code_name(r_type));
++ abort();
++ }
++
++ fixP = fix_new_exp(fragP, fragP->fr_fix - fragP->fr_var, fragP->fr_var,
++ &exp, pcrel, r_type);
++
++ /* Revert fix_new brain damage. "dot_value" is the value of PC at
++ the point of the fixup, relative to the frag address. fix_new()
++ and friends think they are only being called during the assembly
++ pass, not during relaxation or similar, so fx_dot_value, fx_file
++ and fx_line are all initialized to the wrong value. But we don't
++ know the size of the fixup until now, so we really can't live up
++ to the assumptions these functions make about the target. What
++ do these functions think the "where" and "frag" argument mean
++ anyway? */
++ fixP->fx_dot_value = fragP->fr_fix - opc->size;
++ fixP->fx_file = fragP->fr_file;
++ fixP->fx_line = fragP->fr_line;
++
++ fixP->tc_fix_data.ifield = ifield;
++ fixP->tc_fix_data.align = align;
++ /* these are only used if the fixup can actually be resolved */
++ fixP->tc_fix_data.min = -32768;
++ fixP->tc_fix_data.max = 32767;
++}
++
++static void
++avr32_call_convert_frag(bfd *abfd ATTRIBUTE_UNUSED,
++ segT segment ATTRIBUTE_UNUSED,
++ fragS *fragP)
++{
++ const struct avr32_opcode *opc = NULL;
++ const struct avr32_ifield *ifield;
++ bfd_reloc_code_real_type r_type;
++ symbolS *symbol;
++ offsetT offset;
++ fixS *fixP;
++ bfd_vma value;
++ int pcrel = 0, align = 0;
++ char *p;
++
++ symbol = fragP->fr_symbol;
++ offset = fragP->fr_offset;
++ r_type = BFD_RELOC_NONE;
++ p = fragP->fr_opcode;
++
++ pr_debug("%s:%d: call_convert_frag, subtype: %d, fix: %d, var: %d\n",
++ fragP->fr_file, fragP->fr_line,
++ fragP->fr_subtype, fragP->fr_fix, fragP->fr_var);
++
++ switch (fragP->fr_subtype)
++ {
++ case CALL_SUBTYPE_RCALL1:
++ opc = &avr32_opc_table[AVR32_OPC_RCALL1];
++ /* fall through */
++ case CALL_SUBTYPE_RCALL2:
++ if (!opc)
++ opc = &avr32_opc_table[AVR32_OPC_RCALL2];
++ ifield = opc->fields[0];
++ r_type = opc->reloc_type;
++ pcrel = 1;
++ align = 1;
++ break;
++ case CALL_SUBTYPE_MCALL_CP:
++ opc = &avr32_opc_table[AVR32_OPC_MCALL];
++ opc->fields[0]->insert(opc->fields[0], p, AVR32_REG_PC);
++ ifield = opc->fields[1];
++ r_type = BFD_RELOC_AVR32_CPCALL;
++ symbol = fragP->tc_frag_data.pool->symbol;
++ offset = fragP->tc_frag_data.pool->literals[fragP->tc_frag_data.pool_entry].offset;
++ assert(fragP->tc_frag_data.pool->literals[fragP->tc_frag_data.pool_entry].refcount > 0);
++ pcrel = 1;
++ align = 2;
++ break;
++ case CALL_SUBTYPE_MCALL_GOT:
++ opc = &avr32_opc_table[AVR32_OPC_MCALL];
++ opc->fields[0]->insert(opc->fields[0], p, 6);
++ ifield = opc->fields[1];
++ r_type = BFD_RELOC_AVR32_GOT18SW;
++ break;
++ case CALL_SUBTYPE_MCALL_LARGE:
++ assert(fragP->fr_var == 10);
++ /* ld.w lr, r6[lr << 2] */
++ opc = &avr32_opc_table[AVR32_OPC_LD_W5];
++ bfd_putb32(opc->value, p + 4);
++ opc->fields[0]->insert(opc->fields[0], p + 4, AVR32_REG_LR);
++ opc->fields[1]->insert(opc->fields[1], p + 4, 6);
++ opc->fields[2]->insert(opc->fields[2], p + 4, AVR32_REG_LR);
++ opc->fields[3]->insert(opc->fields[3], p + 4, 2);
++
++ /* icall lr */
++ opc = &avr32_opc_table[AVR32_OPC_ICALL];
++ bfd_putb16(opc->value >> 16, p + 8);
++ opc->fields[0]->insert(opc->fields[0], p + 8, AVR32_REG_LR);
++
++ /* mov lr, (got_offset / 4) */
++ opc = &avr32_opc_table[AVR32_OPC_MOV2];
++ opc->fields[0]->insert(opc->fields[0], p, AVR32_REG_LR);
++ ifield = opc->fields[1];
++ r_type = BFD_RELOC_AVR32_GOTCALL;
++ break;
++ default:
++ BAD_CASE(fragP->fr_subtype);
++ }
++
++ /* Insert the opcode and clear the variable ifield */
++ value = bfd_getb32(p);
++ value &= ~(opc->mask | ifield->mask);
++ value |= opc->value;
++ bfd_putb32(value, p);
++
++ fragP->fr_fix += fragP->fr_var - CALL_INITIAL_SIZE;
++
++ if (fragP->fr_next
++ && ((offsetT)(fragP->fr_next->fr_address - fragP->fr_address)
++ != fragP->fr_fix))
++ {
++ fprintf(stderr, "%s:%d: fr_fix %lu is wrong! fr_var=%lu, r_type=%s\n",
++ fragP->fr_file, fragP->fr_line,
++ fragP->fr_fix, fragP->fr_var, bfd_get_reloc_code_name(r_type));
++ fprintf(stderr, "fr_fix should be %ld. next frag is %s:%d\n",
++ (offsetT)(fragP->fr_next->fr_address - fragP->fr_address),
++ fragP->fr_next->fr_file, fragP->fr_next->fr_line);
++ }
++
++ fixP = fix_new(fragP, fragP->fr_fix - fragP->fr_var, fragP->fr_var,
++ symbol, offset, pcrel, r_type);
++
++ /* Revert fix_new brain damage. "dot_value" is the value of PC at
++ the point of the fixup, relative to the frag address. fix_new()
++ and friends think they are only being called during the assembly
++ pass, not during relaxation or similar, so fx_dot_value, fx_file
++ and fx_line are all initialized to the wrong value. But we don't
++ know the size of the fixup until now, so we really can't live up
++ to the assumptions these functions make about the target. What
++ do these functions think the "where" and "frag" argument mean
++ anyway? */
++ fixP->fx_dot_value = fragP->fr_fix - opc->size;
++ fixP->fx_file = fragP->fr_file;
++ fixP->fx_line = fragP->fr_line;
++
++ fixP->tc_fix_data.ifield = ifield;
++ fixP->tc_fix_data.align = align;
++ /* these are only used if the fixup can actually be resolved */
++ fixP->tc_fix_data.min = -2097152;
++ fixP->tc_fix_data.max = 2097150;
++}
++
++static void
++avr32_cpool_convert_frag(bfd *abfd ATTRIBUTE_UNUSED,
++ segT segment ATTRIBUTE_UNUSED,
++ fragS *fragP)
++{
++ struct cpool *pool;
++ addressT address;
++ unsigned int entry;
++ char *p;
++ char sym_name[20];
++
++ /* Did we get rid of the frag altogether? */
++ if (!fragP->fr_var)
++ return;
++
++ pool = fragP->tc_frag_data.pool;
++ address = fragP->fr_address + fragP->fr_fix;
++ p = fragP->fr_literal + fragP->fr_fix;
++
++ sprintf(sym_name, "$$cp_\002%x", pool->id);
++ symbol_locate(pool->symbol, sym_name, pool->section, fragP->fr_fix, fragP);
++ symbol_table_insert(pool->symbol);
++
++ for (entry = 0; entry < pool->next_free_entry; entry++)
++ {
++ if (pool->literals[entry].refcount > 0)
++ {
++ fix_new_exp(fragP, fragP->fr_fix, 4, &pool->literals[entry].exp,
++ FALSE, BFD_RELOC_AVR32_32_CPENT);
++ fragP->fr_fix += 4;
++ }
++ }
++}
++
++static struct avr32_relaxer avr32_default_relaxer = {
++ .estimate_size = avr32_default_estimate_size_before_relax,
++ .relax_frag = avr32_default_relax_frag,
++ .convert_frag = avr32_default_convert_frag,
++};
++static struct avr32_relaxer avr32_lda_relaxer = {
++ .estimate_size = avr32_lda_estimate_size_before_relax,
++ .relax_frag = avr32_lda_relax_frag,
++ .convert_frag = avr32_lda_convert_frag,
++};
++static struct avr32_relaxer avr32_call_relaxer = {
++ .estimate_size = avr32_call_estimate_size_before_relax,
++ .relax_frag = avr32_call_relax_frag,
++ .convert_frag = avr32_call_convert_frag,
++};
++static struct avr32_relaxer avr32_cpool_relaxer = {
++ .estimate_size = avr32_cpool_estimate_size_before_relax,
++ .relax_frag = avr32_cpool_relax_frag,
++ .convert_frag = avr32_cpool_convert_frag,
++};
++
++static void s_cpool(int arg ATTRIBUTE_UNUSED)
++{
++ struct cpool *pool;
++ unsigned int max_size;
++ char *buf;
++
++ pool = find_cpool(now_seg, now_subseg);
++ if (!pool || !pool->symbol || pool->next_free_entry == 0)
++ return;
++
++ /* Make sure the constant pool is properly aligned */
++ frag_align_code(2, 0);
++ if (bfd_get_section_alignment(stdoutput, pool->section) < 2)
++ bfd_set_section_alignment(stdoutput, pool->section, 2);
++
++ /* Assume none of the entries are discarded, and that we need the
++ maximum amount of alignment. But we're not going to allocate
++ anything up front. */
++ max_size = pool->next_free_entry * 4 + 2;
++ frag_grow(max_size);
++ buf = frag_more(0);
++
++ frag_now->tc_frag_data.relaxer = &avr32_cpool_relaxer;
++ frag_now->tc_frag_data.pool = pool;
++
++ symbol_set_frag(pool->symbol, frag_now);
++
++ /* Assume zero initial size, allowing other relaxers to be
++ optimistic about things. */
++ frag_var(rs_machine_dependent, max_size, 0,
++ 0, pool->symbol, 0, NULL);
++
++ /* Mark the pool as empty. */
++ pool->used = 1;
++}
++
++/* The location from which a PC relative jump should be calculated,
++ given a PC relative reloc. */
++
++long
++md_pcrel_from_section (fixS *fixP, segT sec)
++{
++ pr_debug("pcrel_from_section, fx_offset = %d\n", fixP->fx_offset);
++
++ if (fixP->fx_addsy != NULL
++ && (! S_IS_DEFINED (fixP->fx_addsy)
++ || S_GET_SEGMENT (fixP->fx_addsy) != sec
++ || S_FORCE_RELOC(fixP->fx_addsy, 1)))
++ {
++ pr_debug("Unknown pcrel symbol: %s\n", S_GET_NAME(fixP->fx_addsy));
++
++ /* The symbol is undefined (or is defined but not in this section).
++ Let the linker figure it out. */
++ return 0;
++ }
++
++ pr_debug("pcrel from %x + %x, symbol: %s (%x)\n",
++ fixP->fx_frag->fr_address, fixP->fx_where,
++ fixP->fx_addsy?S_GET_NAME(fixP->fx_addsy):"(null)",
++ fixP->fx_addsy?S_GET_VALUE(fixP->fx_addsy):0);
++
++ return ((fixP->fx_frag->fr_address + fixP->fx_where)
++ & (~0UL << fixP->tc_fix_data.align));
++}
++
++valueT
++md_section_align (segT segment, valueT size)
++{
++ int align = bfd_get_section_alignment (stdoutput, segment);
++ return ((size + (1 << align) - 1) & (-1 << align));
++}
++
++static int syntax_matches(const struct avr32_syntax *syntax,
++ char *str)
++{
++ int i;
++
++ pr_debug("syntax %d matches `%s'?\n", syntax->id, str);
++
++ if (syntax->nr_operands < 0)
++ {
++ struct avr32_operand *op;
++ int optype;
++
++ for (i = 0; i < (-syntax->nr_operands - 1); i++)
++ {
++ char *p;
++ char c;
++
++ optype = syntax->operand[i];
++ assert(optype < AVR32_NR_OPERANDS);
++ op = &avr32_operand_table[optype];
++
++ for (p = str; *p; p++)
++ if (*p == ',')
++ break;
++
++ if (p == str)
++ return 0;
++
++ c = *p;
++ *p = 0;
++
++ if (!op->match(str))
++ {
++ *p = c;
++ return 0;
++ }
++
++ str = p;
++ *p = c;
++ if (c)
++ str++;
++ }
++
++ optype = syntax->operand[i];
++ assert(optype < AVR32_NR_OPERANDS);
++ op = &avr32_operand_table[optype];
++
++ if (!op->match(str))
++ return 0;
++ return 1;
++ }
++
++ for (i = 0; i < syntax->nr_operands; i++)
++ {
++ struct avr32_operand *op;
++ int optype = syntax->operand[i];
++ char *p;
++ char c;
++
++ assert(optype < AVR32_NR_OPERANDS);
++ op = &avr32_operand_table[optype];
++
++ for (p = str; *p; p++)
++ if (*p == ',')
++ break;
++
++ if (p == str)
++ return 0;
++
++ c = *p;
++ *p = 0;
++
++ if (!op->match(str))
++ {
++ *p = c;
++ return 0;
++ }
++
++ str = p;
++ *p = c;
++ if (c)
++ str++;
++ }
++
++ if (*str == '\0')
++ return 1;
++
++ if ((*str == 'e' || *str == 'E') && !str[1])
++ return 1;
++
++ return 0;
++}
++
++static int parse_operands(char *str)
++{
++ int i;
++
++ if (current_insn.syntax->nr_operands < 0)
++ {
++ int optype;
++ struct avr32_operand *op;
++
++ for (i = 0; i < (-current_insn.syntax->nr_operands - 1); i++)
++ {
++ char *p;
++ char c;
++
++ optype = current_insn.syntax->operand[i];
++ op = &avr32_operand_table[optype];
++
++ for (p = str; *p; p++)
++ if (*p == ',')
++ break;
++
++ assert(p != str);
++
++ c = *p, *p = 0;
++ op->parse(op, str, i);
++ *p = c;
++
++ str = p;
++ if (c) str++;
++ }
++
++ /* give the rest of the line to the last operand */
++ optype = current_insn.syntax->operand[i];
++ op = &avr32_operand_table[optype];
++ op->parse(op, str, i);
++ }
++ else
++ {
++ for (i = 0; i < current_insn.syntax->nr_operands; i++)
++ {
++ int optype = current_insn.syntax->operand[i];
++ struct avr32_operand *op = &avr32_operand_table[optype];
++ char *p;
++ char c;
++
++ skip_whitespace(str);
++
++ for (p = str; *p; p++)
++ if (*p == ',')
++ break;
++
++ assert(p != str);
++
++ c = *p, *p = 0;
++ op->parse(op, str, i);
++ *p = c;
++
++ str = p;
++ if (c) str++;
++ }
++
++ if (*str == 'E' || *str == 'e')
++ current_insn.force_extended = 1;
++ }
++
++ return 0;
++}
++
++static const char *
++finish_insn(const struct avr32_opcode *opc)
++{
++ expressionS *exp = &current_insn.immediate;
++ unsigned int i;
++ int will_relax = 0;
++ char *buf;
++
++ assert(current_insn.next_slot == opc->nr_fields);
++
++ pr_debug("%s:%d: finish_insn: trying opcode %d\n",
++ frag_now->fr_file, frag_now->fr_line, opc->id);
++
++ /* Go through the relaxation stage for all instructions that can
++ possibly take a symbolic immediate. The relax code will take
++ care of range checking and alignment. */
++ if (opc->var_field != -1)
++ {
++ int substate, largest_substate;
++ symbolS *sym;
++ offsetT off;
++
++ will_relax = 1;
++ substate = largest_substate = opc_initial_substate(opc);
++
++ while (relax_more(largest_substate) != AVR32_RS_NONE)
++ largest_substate = relax_more(largest_substate);
++
++ pr_debug("will relax. initial substate: %d (size %d), largest substate: %d (size %d)\n",
++ substate, avr32_rs_size(substate),
++ largest_substate, avr32_rs_size(largest_substate));
++
++ /* make sure we have enough room for the largest possible opcode */
++ frag_grow(avr32_rs_size(largest_substate));
++ buf = frag_more(opc->size);
++
++ dwarf2_emit_insn(opc->size);
++
++ frag_now->tc_frag_data.reloc_info = AVR32_OPINFO_NONE;
++ frag_now->tc_frag_data.pcrel = current_insn.pcrel;
++ frag_now->tc_frag_data.force_extended = current_insn.force_extended;
++ frag_now->tc_frag_data.relaxer = &avr32_default_relaxer;
++
++ if (exp->X_op == O_hi)
++ {
++ frag_now->tc_frag_data.reloc_info = AVR32_OPINFO_HI;
++ exp->X_op = exp->X_md;
++ }
++ else if (exp->X_op == O_lo)
++ {
++ frag_now->tc_frag_data.reloc_info = AVR32_OPINFO_LO;
++ exp->X_op = exp->X_md;
++ }
++ else if (exp->X_op == O_got)
++ {
++ frag_now->tc_frag_data.reloc_info = AVR32_OPINFO_GOT;
++ exp->X_op = O_symbol;
++ }
++
++#if 0
++ if ((opc->reloc_type == BFD_RELOC_AVR32_SUB5)
++ && exp->X_op == O_subtract)
++ {
++ symbolS *tmp;
++ tmp = exp->X_add_symbol;
++ exp->X_add_symbol = exp->X_op_symbol;
++ exp->X_op_symbol = tmp;
++ }
++#endif
++
++ frag_now->tc_frag_data.exp = current_insn.immediate;
++
++ sym = exp->X_add_symbol;
++ off = exp->X_add_number;
++ if (exp->X_op != O_symbol)
++ {
++ sym = make_expr_symbol(exp);
++ off = 0;
++ }
++
++ frag_var(rs_machine_dependent,
++ avr32_rs_size(largest_substate) - opc->size,
++ opc->size,
++ substate, sym, off, buf);
++ }
++ else
++ {
++ assert(avr32_rs_size(opc_initial_substate(opc)) == 0);
++
++ /* Make sure we always have room for another whole word, as the ifield
++ inserters can only write words. */
++ frag_grow(4);
++ buf = frag_more(opc->size);
++ dwarf2_emit_insn(opc->size);
++ }
++
++ assert(!(opc->value & ~opc->mask));
++
++ pr_debug("inserting opcode: 0x%lx\n", opc->value);
++ bfd_putb32(opc->value, buf);
++
++ for (i = 0; i < opc->nr_fields; i++)
++ {
++ const struct avr32_ifield *f = opc->fields[i];
++ const struct avr32_ifield_data *fd = &current_insn.field_value[i];
++
++ pr_debug("inserting field: 0x%lx & 0x%lx\n",
++ fd->value >> fd->align_order, f->mask);
++
++ f->insert(f, buf, fd->value >> fd->align_order);
++ }
++
++ assert(will_relax || !current_insn.immediate.X_add_symbol);
++ return NULL;
++}
++
++static const char *
++finish_alias(const struct avr32_alias *alias)
++{
++ const struct avr32_opcode *opc;
++ struct {
++ unsigned long value;
++ unsigned long align;
++ } mapped_operand[AVR32_MAX_OPERANDS];
++ unsigned int i;
++
++ opc = alias->opc;
++
++ /* Remap the operands from the alias to the real opcode */
++ for (i = 0; i < opc->nr_fields; i++)
++ {
++ if (alias->operand_map[i].is_opindex)
++ {
++ struct avr32_ifield_data *fd;
++ fd = &current_insn.field_value[alias->operand_map[i].value];
++ mapped_operand[i].value = fd->value;
++ mapped_operand[i].align = fd->align_order;
++ }
++ else
++ {
++ mapped_operand[i].value = alias->operand_map[i].value;
++ mapped_operand[i].align = 0;
++ }
++ }
++
++ for (i = 0; i < opc->nr_fields; i++)
++ {
++ current_insn.field_value[i].value = mapped_operand[i].value;
++ if (opc->id == AVR32_OPC_COP)
++ current_insn.field_value[i].align_order = 0;
++ else
++ current_insn.field_value[i].align_order
++ = mapped_operand[i].align;
++ }
++
++ current_insn.next_slot = opc->nr_fields;
++
++ return finish_insn(opc);
++}
++
++static const char *
++finish_lda(const struct avr32_syntax *syntax ATTRIBUTE_UNUSED)
++{
++ expressionS *exp = &current_insn.immediate;
++ relax_substateT initial_subtype;
++ symbolS *sym;
++ offsetT off;
++ int initial_size, max_size;
++ char *buf;
++
++ initial_size = LDA_INITIAL_SIZE;
++
++ if (avr32_pic)
++ {
++ initial_subtype = LDA_SUBTYPE_SUB;
++ if (linkrelax)
++ max_size = 8;
++ else
++ max_size = 4;
++ }
++ else
++ {
++ initial_subtype = LDA_SUBTYPE_MOV1;
++ max_size = 4;
++ }
++
++ frag_grow(max_size);
++ buf = frag_more(initial_size);
++ dwarf2_emit_insn(initial_size);
++
++ if (exp->X_op == O_symbol)
++ {
++ sym = exp->X_add_symbol;
++ off = exp->X_add_number;
++ }
++ else
++ {
++ sym = make_expr_symbol(exp);
++ off = 0;
++ }
++
++ frag_now->tc_frag_data.reloc_info = current_insn.field_value[0].value;
++ frag_now->tc_frag_data.relaxer = &avr32_lda_relaxer;
++
++ if (!avr32_pic)
++ {
++ /* The relaxer will bump the refcount if necessary */
++ frag_now->tc_frag_data.pool
++ = add_to_cpool(exp, &frag_now->tc_frag_data.pool_entry, 0);
++ }
++
++ frag_var(rs_machine_dependent, max_size - initial_size,
++ initial_size, initial_subtype, sym, off, buf);
++
++ return NULL;
++}
++
++static const char *
++finish_call(const struct avr32_syntax *syntax ATTRIBUTE_UNUSED)
++{
++ expressionS *exp = &current_insn.immediate;
++ symbolS *sym;
++ offsetT off;
++ int initial_size, max_size;
++ char *buf;
++
++ initial_size = CALL_INITIAL_SIZE;
++
++ if (avr32_pic)
++ {
++ if (linkrelax)
++ max_size = 10;
++ else
++ max_size = 4;
++ }
++ else
++ max_size = 4;
++
++ frag_grow(max_size);
++ buf = frag_more(initial_size);
++ dwarf2_emit_insn(initial_size);
++
++ frag_now->tc_frag_data.relaxer = &avr32_call_relaxer;
++
++ if (exp->X_op == O_symbol)
++ {
++ sym = exp->X_add_symbol;
++ off = exp->X_add_number;
++ }
++ else
++ {
++ sym = make_expr_symbol(exp);
++ off = 0;
++ }
++
++ if (!avr32_pic)
++ {
++ /* The relaxer will bump the refcount if necessary */
++ frag_now->tc_frag_data.pool
++ = add_to_cpool(exp, &frag_now->tc_frag_data.pool_entry, 0);
++ }
++
++ frag_var(rs_machine_dependent, max_size - initial_size,
++ initial_size, CALL_SUBTYPE_RCALL1, sym, off, buf);
++
++ return NULL;
++}
++
++void
++md_begin (void)
++{
++ unsigned long flags = 0;
++ int i;
++
++ avr32_mnemonic_htab = hash_new();
++
++ if (!avr32_mnemonic_htab)
++ as_fatal(_("virtual memory exhausted"));
++
++ for (i = 0; i < AVR32_NR_MNEMONICS; i++)
++ {
++ hash_insert(avr32_mnemonic_htab, avr32_mnemonic_table[i].name,
++ (void *)&avr32_mnemonic_table[i]);
++ }
++
++ if (linkrelax)
++ flags |= EF_AVR32_LINKRELAX;
++ if (avr32_pic)
++ flags |= EF_AVR32_PIC;
++
++ bfd_set_private_flags(stdoutput, flags);
++
++#ifdef OPC_CONSISTENCY_CHECK
++ if (sizeof(avr32_operand_table)/sizeof(avr32_operand_table[0])
++ < AVR32_NR_OPERANDS)
++ as_fatal(_("operand table is incomplete"));
++
++ for (i = 0; i < AVR32_NR_OPERANDS; i++)
++ if (avr32_operand_table[i].id != i)
++ as_fatal(_("operand table inconsistency found at index %d\n"), i);
++ pr_debug("%d operands verified\n", AVR32_NR_OPERANDS);
++
++ for (i = 0; i < AVR32_NR_IFIELDS; i++)
++ if (avr32_ifield_table[i].id != i)
++ as_fatal(_("ifield table inconsistency found at index %d\n"), i);
++ pr_debug("%d instruction fields verified\n", AVR32_NR_IFIELDS);
++
++ for (i = 0; i < AVR32_NR_OPCODES; i++)
++ {
++ if (avr32_opc_table[i].id != i)
++ as_fatal(_("opcode table inconsistency found at index %d\n"), i);
++ if ((avr32_opc_table[i].var_field == -1
++ && avr32_relax_table[i].length != 0)
++ || (avr32_opc_table[i].var_field != -1
++ && avr32_relax_table[i].length == 0))
++ as_fatal(_("relax table inconsistency found at index %d\n"), i);
++ }
++ pr_debug("%d opcodes verified\n", AVR32_NR_OPCODES);
++
++ for (i = 0; i < AVR32_NR_SYNTAX; i++)
++ if (avr32_syntax_table[i].id != i)
++ as_fatal(_("syntax table inconsistency found at index %d\n"), i);
++ pr_debug("%d syntax variants verified\n", AVR32_NR_SYNTAX);
++
++ for (i = 0; i < AVR32_NR_ALIAS; i++)
++ if (avr32_alias_table[i].id != i)
++ as_fatal(_("alias table inconsistency found at index %d\n"), i);
++ pr_debug("%d aliases verified\n", AVR32_NR_ALIAS);
++
++ for (i = 0; i < AVR32_NR_MNEMONICS; i++)
++ if (avr32_mnemonic_table[i].id != i)
++ as_fatal(_("mnemonic table inconsistency found at index %d\n"), i);
++ pr_debug("%d mnemonics verified\n", AVR32_NR_MNEMONICS);
++#endif
++}
++
++void
++md_assemble (char *str)
++{
++ struct avr32_mnemonic *mnemonic;
++ char *p, c;
++
++ memset(&current_insn, 0, sizeof(current_insn));
++ current_insn.immediate.X_op = O_constant;
++
++ skip_whitespace(str);
++ for (p = str; *p; p++)
++ if (*p == ' ')
++ break;
++ c = *p;
++ *p = 0;
++
++ mnemonic = hash_find(avr32_mnemonic_htab, str);
++ *p = c;
++ if (c) p++;
++
++ if (mnemonic)
++ {
++ const struct avr32_syntax *syntax;
++
++ for (syntax = mnemonic->syntax; syntax; syntax = syntax->next)
++ {
++ const char *errmsg = NULL;
++
++ if (syntax_matches(syntax, p))
++ {
++ if (!(syntax->isa_flags & avr32_arch->isa_flags))
++ {
++ as_bad(_("Selected architecture `%s' does not support `%s'"),
++ avr32_arch->name, str);
++ return;
++ }
++
++ current_insn.syntax = syntax;
++ parse_operands(p);
++
++ switch (syntax->type)
++ {
++ case AVR32_PARSER_NORMAL:
++ errmsg = finish_insn(syntax->u.opc);
++ break;
++ case AVR32_PARSER_ALIAS:
++ errmsg = finish_alias(syntax->u.alias);
++ break;
++ case AVR32_PARSER_LDA:
++ errmsg = finish_lda(syntax);
++ break;
++ case AVR32_PARSER_CALL:
++ errmsg = finish_call(syntax);
++ break;
++ default:
++ BAD_CASE(syntax->type);
++ break;
++ }
++
++ if (errmsg)
++ as_bad("%s in `%s'", errmsg, str);
++
++ return;
++ }
++ }
++
++ as_bad(_("unrecognized form of instruction: `%s'"), str);
++ }
++ else
++ as_bad(_("unrecognized instruction `%s'"), str);
++}
++
++void avr32_cleanup(void)
++{
++ struct cpool *pool;
++
++ /* Emit any constant pools that haven't been explicitly flushed with
++ a .cpool directive. */
++ for (pool = cpool_list; pool; pool = pool->next)
++ {
++ subseg_set(pool->section, pool->sub_section);
++ s_cpool(0);
++ }
++}
++
++/* Handle any PIC-related operands in data allocation pseudo-ops */
++void
++avr32_cons_fix_new (fragS *frag, int off, int size, expressionS *exp)
++{
++ bfd_reloc_code_real_type r_type = BFD_RELOC_UNUSED;
++ int pcrel = 0;
++
++ pr_debug("%s:%u: cons_fix_new, add_sym: %s, op_sym: %s, op: %d, add_num: %d\n",
++ frag->fr_file, frag->fr_line,
++ exp->X_add_symbol?S_GET_NAME(exp->X_add_symbol):"(none)",
++ exp->X_op_symbol?S_GET_NAME(exp->X_op_symbol):"(none)",
++ exp->X_op, exp->X_add_number);
++
++ if (exp->X_op == O_subtract && exp->X_op_symbol)
++ {
++ if (exp->X_op_symbol == GOT_symbol)
++ {
++ if (size != 4)
++ goto bad_size;
++ r_type = BFD_RELOC_AVR32_GOTPC;
++ exp->X_op = O_symbol;
++ exp->X_op_symbol = NULL;
++ }
++ }
++ else if (exp->X_op == O_got)
++ {
++ switch (size)
++ {
++ case 1:
++ r_type = BFD_RELOC_AVR32_GOT8;
++ break;
++ case 2:
++ r_type = BFD_RELOC_AVR32_GOT16;
++ break;
++ case 4:
++ r_type = BFD_RELOC_AVR32_GOT32;
++ break;
++ default:
++ goto bad_size;
++ }
++
++ exp->X_op = O_symbol;
++ }
++
++ if (r_type == BFD_RELOC_UNUSED)
++ switch (size)
++ {
++ case 1:
++ r_type = BFD_RELOC_8;
++ break;
++ case 2:
++ r_type = BFD_RELOC_16;
++ break;
++ case 4:
++ r_type = BFD_RELOC_32;
++ break;
++ default:
++ goto bad_size;
++ }
++ else if (size != 4)
++ {
++ bad_size:
++ as_bad(_("unsupported BFD relocation size %u"), size);
++ r_type = BFD_RELOC_UNUSED;
++ }
++
++ fix_new_exp (frag, off, size, exp, pcrel, r_type);
++}
++
++static void
++avr32_frob_section(bfd *abfd ATTRIBUTE_UNUSED, segT sec,
++ void *ignore ATTRIBUTE_UNUSED)
++{
++ segment_info_type *seginfo;
++ fixS *fix;
++
++ seginfo = seg_info(sec);
++ if (!seginfo)
++ return;
++
++ for (fix = seginfo->fix_root; fix; fix = fix->fx_next)
++ {
++ if (fix->fx_done)
++ continue;
++
++ if (fix->fx_r_type == BFD_RELOC_AVR32_SUB5
++ && fix->fx_addsy && fix->fx_subsy)
++ {
++ if (S_GET_SEGMENT(fix->fx_addsy) != S_GET_SEGMENT(fix->fx_subsy)
++ || linkrelax)
++ {
++ symbolS *tmp;
++#ifdef DEBUG
++ fprintf(stderr, "Swapping symbols in fixup:\n");
++ print_fixup(fix);
++#endif
++ tmp = fix->fx_addsy;
++ fix->fx_addsy = fix->fx_subsy;
++ fix->fx_subsy = tmp;
++ fix->fx_offset = -fix->fx_offset;
++ }
++ }
++ }
++}
++
++/* We need to look for SUB5 instructions with expressions that will be
++ made PC-relative and switch fx_addsy with fx_subsy. This has to be
++ done before adjustment or the wrong symbol might be adjusted.
++
++ This applies to fixups that are a result of expressions like -(sym
++ - .) and that will make it all the way to md_apply_fix3(). LDA
++ does the right thing in convert_frag, so we must not convert
++ those. */
++void
++avr32_frob_file(void)
++{
++ /* if (1 || !linkrelax)
++ return; */
++
++ bfd_map_over_sections(stdoutput, avr32_frob_section, NULL);
++}
++
++static bfd_boolean
++convert_to_diff_reloc(fixS *fixP)
++{
++ switch (fixP->fx_r_type)
++ {
++ case BFD_RELOC_32:
++ fixP->fx_r_type = BFD_RELOC_AVR32_DIFF32;
++ break;
++ case BFD_RELOC_16:
++ fixP->fx_r_type = BFD_RELOC_AVR32_DIFF16;
++ break;
++ case BFD_RELOC_8:
++ fixP->fx_r_type = BFD_RELOC_AVR32_DIFF8;
++ break;
++ default:
++ return FALSE;
++ }
++
++ return TRUE;
++}
++
++/* Simplify a fixup. If possible, the fixup is reduced to a single
++ constant which is written to the output file. Otherwise, a
++ relocation is generated so that the linker can take care of the
++ rest.
++
++ ELF relocations have certain constraints: They can only take a
++ single symbol and a single addend. This means that for difference
++ expressions, we _must_ get rid of the fx_subsy symbol somehow.
++
++ The difference between two labels in the same section can be
++ calculated directly unless 'linkrelax' is set, or a relocation is
++ forced. If so, we must emit a R_AVR32_DIFFxx relocation. If there
++ are addends involved at this point, we must be especially careful
++ as the relocation must point exactly to the symbol being
++ subtracted.
++
++ When subtracting a symbol defined in the same section as the fixup,
++ we might be able to convert it to a PC-relative expression, unless
++ linkrelax is set. If this is the case, there's no way we can make
++ sure that the difference between the fixup and fx_subsy stays
++ constant. So for now, we're just going to disallow that.
++ */
++void
++avr32_process_fixup(fixS *fixP, segT this_segment)
++{
++ segT add_symbol_segment = absolute_section;
++ segT sub_symbol_segment = absolute_section;
++ symbolS *fx_addsy, *fx_subsy;
++ offsetT value = 0, fx_offset;
++ bfd_boolean apply = FALSE;
++
++ assert(this_segment != absolute_section);
++
++ if (fixP->fx_r_type >= BFD_RELOC_UNUSED)
++ {
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("Bad relocation type %d\n"), fixP->fx_r_type);
++ return;
++ }
++
++ /* BFD_RELOC_AVR32_SUB5 fixups have been swapped by avr32_frob_section() */
++ fx_addsy = fixP->fx_addsy;
++ fx_subsy = fixP->fx_subsy;
++ fx_offset = fixP->fx_offset;
++
++ if (fx_addsy)
++ add_symbol_segment = S_GET_SEGMENT(fx_addsy);
++
++ if (fx_subsy)
++ {
++ resolve_symbol_value(fx_subsy);
++ sub_symbol_segment = S_GET_SEGMENT(fx_subsy);
++
++ if (sub_symbol_segment == this_segment
++ && (!linkrelax
++ || S_GET_VALUE(fx_subsy) == (fixP->fx_frag->fr_address
++ + fixP->fx_where)))
++ {
++ fixP->fx_pcrel = TRUE;
++ fx_offset += (fixP->fx_frag->fr_address + fixP->fx_where
++ - S_GET_VALUE(fx_subsy));
++ fx_subsy = NULL;
++ }
++ else if (sub_symbol_segment == absolute_section)
++ {
++ /* The symbol is really a constant. */
++ fx_offset -= S_GET_VALUE(fx_subsy);
++ fx_subsy = NULL;
++ }
++ else if (SEG_NORMAL(add_symbol_segment)
++ && sub_symbol_segment == add_symbol_segment
++ && (!linkrelax || convert_to_diff_reloc(fixP)))
++ {
++ /* Difference between two labels in the same section. */
++ if (linkrelax)
++ {
++ /* convert_to_diff() has ensured that the reloc type is
++ either DIFF32, DIFF16 or DIFF8. */
++ value = (S_GET_VALUE(fx_addsy) + fixP->fx_offset
++ - S_GET_VALUE(fx_subsy));
++
++ /* Try to convert it to a section symbol if possible */
++ if (!S_FORCE_RELOC(fx_addsy, 1)
++ && !(sub_symbol_segment->flags & SEC_THREAD_LOCAL))
++ {
++ fx_offset = S_GET_VALUE(fx_subsy);
++ fx_addsy = section_symbol(sub_symbol_segment);
++ }
++ else
++ {
++ fx_addsy = fx_subsy;
++ fx_offset = 0;
++ }
++
++ fx_subsy = NULL;
++ apply = TRUE;
++ }
++ else
++ {
++ fx_offset += S_GET_VALUE(fx_addsy);
++ fx_offset -= S_GET_VALUE(fx_subsy);
++ fx_addsy = NULL;
++ fx_subsy = NULL;
++ }
++ }
++ else
++ {
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("can't resolve `%s' {%s section} - `%s' {%s section}"),
++ fx_addsy ? S_GET_NAME (fx_addsy) : "0",
++ segment_name (add_symbol_segment),
++ S_GET_NAME (fx_subsy),
++ segment_name (sub_symbol_segment));
++ return;
++ }
++ }
++
++ if (fx_addsy && !TC_FORCE_RELOCATION(fixP))
++ {
++ if (add_symbol_segment == this_segment
++ && fixP->fx_pcrel)
++ {
++ value += S_GET_VALUE(fx_addsy);
++ value -= md_pcrel_from_section(fixP, this_segment);
++ fx_addsy = NULL;
++ fixP->fx_pcrel = FALSE;
++ }
++ else if (add_symbol_segment == absolute_section)
++ {
++ fx_offset += S_GET_VALUE(fixP->fx_addsy);
++ fx_addsy = NULL;
++ }
++ }
++
++ if (!fx_addsy)
++ fixP->fx_done = TRUE;
++
++ if (fixP->fx_pcrel)
++ {
++ if (fx_addsy != NULL
++ && S_IS_DEFINED(fx_addsy)
++ && S_GET_SEGMENT(fx_addsy) != this_segment)
++ value += md_pcrel_from_section(fixP, this_segment);
++
++ switch (fixP->fx_r_type)
++ {
++ case BFD_RELOC_32:
++ fixP->fx_r_type = BFD_RELOC_32_PCREL;
++ break;
++ case BFD_RELOC_16:
++ fixP->fx_r_type = BFD_RELOC_16_PCREL;
++ break;
++ case BFD_RELOC_8:
++ fixP->fx_r_type = BFD_RELOC_8_PCREL;
++ break;
++ case BFD_RELOC_AVR32_SUB5:
++ fixP->fx_r_type = BFD_RELOC_AVR32_16N_PCREL;
++ break;
++ case BFD_RELOC_AVR32_16S:
++ fixP->fx_r_type = BFD_RELOC_AVR32_16B_PCREL;
++ break;
++ case BFD_RELOC_AVR32_14UW:
++ fixP->fx_r_type = BFD_RELOC_AVR32_14UW_PCREL;
++ break;
++ case BFD_RELOC_AVR32_10UW:
++ fixP->fx_r_type = BFD_RELOC_AVR32_10UW_PCREL;
++ break;
++ default:
++ /* Should have been taken care of already */
++ break;
++ }
++ }
++
++ if (fixP->fx_done || apply)
++ {
++ const struct avr32_ifield *ifield;
++ char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
++
++ if (fixP->fx_done)
++ value += fx_offset;
++
++ /* For hosts with longs bigger than 32-bits make sure that the top
++ bits of a 32-bit negative value read in by the parser are set,
++ so that the correct comparisons are made. */
++ if (value & 0x80000000)
++ value |= (-1L << 31);
++
++ switch (fixP->fx_r_type)
++ {
++ case BFD_RELOC_32:
++ case BFD_RELOC_16:
++ case BFD_RELOC_8:
++ case BFD_RELOC_AVR32_DIFF32:
++ case BFD_RELOC_AVR32_DIFF16:
++ case BFD_RELOC_AVR32_DIFF8:
++ md_number_to_chars(buf, value, fixP->fx_size);
++ break;
++ case BFD_RELOC_HI16:
++ value >>= 16;
++ case BFD_RELOC_LO16:
++ value &= 0xffff;
++ md_number_to_chars(buf + 2, value, 2);
++ break;
++ case BFD_RELOC_AVR32_16N_PCREL:
++ value = -value;
++ /* fall through */
++ case BFD_RELOC_AVR32_22H_PCREL:
++ case BFD_RELOC_AVR32_18W_PCREL:
++ case BFD_RELOC_AVR32_16B_PCREL:
++ case BFD_RELOC_AVR32_11H_PCREL:
++ case BFD_RELOC_AVR32_9H_PCREL:
++ case BFD_RELOC_AVR32_9UW_PCREL:
++ case BFD_RELOC_AVR32_3U:
++ case BFD_RELOC_AVR32_4UH:
++ case BFD_RELOC_AVR32_6UW:
++ case BFD_RELOC_AVR32_6S:
++ case BFD_RELOC_AVR32_7UW:
++ case BFD_RELOC_AVR32_8S_EXT:
++ case BFD_RELOC_AVR32_8S:
++ case BFD_RELOC_AVR32_10UW:
++ case BFD_RELOC_AVR32_10SW:
++ case BFD_RELOC_AVR32_STHH_W:
++ case BFD_RELOC_AVR32_14UW:
++ case BFD_RELOC_AVR32_16S:
++ case BFD_RELOC_AVR32_16U:
++ case BFD_RELOC_AVR32_21S:
++ case BFD_RELOC_AVR32_SUB5:
++ case BFD_RELOC_AVR32_CPCALL:
++ case BFD_RELOC_AVR32_16_CP:
++ case BFD_RELOC_AVR32_9W_CP:
++ case BFD_RELOC_AVR32_15S:
++ ifield = fixP->tc_fix_data.ifield;
++ pr_debug("insert field: %ld <= %ld <= %ld (align %u)\n",
++ fixP->tc_fix_data.min, value, fixP->tc_fix_data.max,
++ fixP->tc_fix_data.align);
++ if (value < fixP->tc_fix_data.min || value > fixP->tc_fix_data.max)
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("operand out of range (%ld not between %ld and %ld)"),
++ value, fixP->tc_fix_data.min, fixP->tc_fix_data.max);
++ if (value & ((1 << fixP->tc_fix_data.align) - 1))
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("misaligned operand (required alignment: %d)"),
++ 1 << fixP->tc_fix_data.align);
++ ifield->insert(ifield, buf, value >> fixP->tc_fix_data.align);
++ break;
++ case BFD_RELOC_AVR32_ALIGN:
++ /* Nothing to do */
++ fixP->fx_done = FALSE;
++ break;
++ default:
++ as_fatal("reloc type %s not handled\n",
++ bfd_get_reloc_code_name(fixP->fx_r_type));
++ }
++ }
++
++ fixP->fx_addsy = fx_addsy;
++ fixP->fx_subsy = fx_subsy;
++ fixP->fx_offset = fx_offset;
++
++ if (!fixP->fx_done)
++ {
++ if (!fixP->fx_addsy)
++ fixP->fx_addsy = abs_section_sym;
++
++ symbol_mark_used_in_reloc(fixP->fx_addsy);
++ if (fixP->fx_subsy)
++ abort();
++ }
++}
++
++#if 0
++void
++md_apply_fix3 (fixS *fixP, valueT *valP, segT seg)
++{
++ const struct avr32_ifield *ifield;
++ offsetT value = *valP;
++ char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
++ bfd_boolean apply;
++
++ pr_debug("%s:%u: apply_fix3: r_type=%d value=%lx offset=%lx\n",
++ fixP->fx_file, fixP->fx_line, fixP->fx_r_type, *valP,
++ fixP->fx_offset);
++
++ if (fixP->fx_r_type >= BFD_RELOC_UNUSED)
++ {
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("Bad relocation type %d\n"), fixP->fx_r_type);
++ return;
++ }
++
++ if (!fixP->fx_addsy && !fixP->fx_subsy)
++ fixP->fx_done = 1;
++
++ if (fixP->fx_pcrel)
++ {
++ if (fixP->fx_addsy != NULL
++ && S_IS_DEFINED(fixP->fx_addsy)
++ && S_GET_SEGMENT(fixP->fx_addsy) != seg)
++ value += md_pcrel_from_section(fixP, seg);
++
++ switch (fixP->fx_r_type)
++ {
++ case BFD_RELOC_32:
++ fixP->fx_r_type = BFD_RELOC_32_PCREL;
++ break;
++ case BFD_RELOC_16:
++ case BFD_RELOC_8:
++ as_bad_where (fixP->fx_file, fixP->fx_line,
++ _("8- and 16-bit PC-relative relocations not supported"));
++ break;
++ case BFD_RELOC_AVR32_SUB5:
++ fixP->fx_r_type = BFD_RELOC_AVR32_PCREL_SUB5;
++ break;
++ case BFD_RELOC_AVR32_16S:
++ fixP->fx_r_type = BFD_RELOC_AVR32_16_PCREL;
++ break;
++ default:
++ /* Should have been taken care of already */
++ break;
++ }
++ }
++
++ if (fixP->fx_r_type == BFD_RELOC_32
++ && fixP->fx_subsy)
++ {
++ fixP->fx_r_type = BFD_RELOC_AVR32_DIFF32;
++
++ /* Offsets are only allowed if it's a result of adjusting a
++ local symbol into a section-relative offset.
++ tc_fix_adjustable() should prevent any adjustment if there
++ was an offset involved before. */
++ if (fixP->fx_offset && !symbol_section_p(fixP->fx_addsy))
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("cannot represent symbol difference with an offset"));
++
++ value = (S_GET_VALUE(fixP->fx_addsy) + fixP->fx_offset
++ - S_GET_VALUE(fixP->fx_subsy));
++
++ /* The difference before any relaxing takes place is written
++ out, and the DIFF32 reloc identifies the address of the first
++ symbol (i.e. the on that's subtracted.) */
++ *valP = value;
++ fixP->fx_offset -= value;
++ fixP->fx_subsy = NULL;
++
++ md_number_to_chars(buf, value, fixP->fx_size);
++ }
++
++ if (fixP->fx_done)
++ {
++ switch (fixP->fx_r_type)
++ {
++ case BFD_RELOC_8:
++ case BFD_RELOC_16:
++ case BFD_RELOC_32:
++ md_number_to_chars(buf, value, fixP->fx_size);
++ break;
++ case BFD_RELOC_HI16:
++ value >>= 16;
++ case BFD_RELOC_LO16:
++ value &= 0xffff;
++ *valP = value;
++ md_number_to_chars(buf + 2, value, 2);
++ break;
++ case BFD_RELOC_AVR32_PCREL_SUB5:
++ value = -value;
++ /* fall through */
++ case BFD_RELOC_AVR32_9_PCREL:
++ case BFD_RELOC_AVR32_11_PCREL:
++ case BFD_RELOC_AVR32_16_PCREL:
++ case BFD_RELOC_AVR32_18_PCREL:
++ case BFD_RELOC_AVR32_22_PCREL:
++ case BFD_RELOC_AVR32_3U:
++ case BFD_RELOC_AVR32_4UH:
++ case BFD_RELOC_AVR32_6UW:
++ case BFD_RELOC_AVR32_6S:
++ case BFD_RELOC_AVR32_7UW:
++ case BFD_RELOC_AVR32_8S:
++ case BFD_RELOC_AVR32_10UW:
++ case BFD_RELOC_AVR32_10SW:
++ case BFD_RELOC_AVR32_14UW:
++ case BFD_RELOC_AVR32_16S:
++ case BFD_RELOC_AVR32_16U:
++ case BFD_RELOC_AVR32_21S:
++ case BFD_RELOC_AVR32_BRC1:
++ case BFD_RELOC_AVR32_SUB5:
++ case BFD_RELOC_AVR32_CPCALL:
++ case BFD_RELOC_AVR32_16_CP:
++ case BFD_RELOC_AVR32_9_CP:
++ case BFD_RELOC_AVR32_15S:
++ ifield = fixP->tc_fix_data.ifield;
++ pr_debug("insert field: %ld <= %ld <= %ld (align %u)\n",
++ fixP->tc_fix_data.min, value, fixP->tc_fix_data.max,
++ fixP->tc_fix_data.align);
++ if (value < fixP->tc_fix_data.min || value > fixP->tc_fix_data.max)
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("operand out of range (%ld not between %ld and %ld)"),
++ value, fixP->tc_fix_data.min, fixP->tc_fix_data.max);
++ if (value & ((1 << fixP->tc_fix_data.align) - 1))
++ as_bad_where(fixP->fx_file, fixP->fx_line,
++ _("misaligned operand (required alignment: %d)"),
++ 1 << fixP->tc_fix_data.align);
++ ifield->insert(ifield, buf, value >> fixP->tc_fix_data.align);
++ break;
++ case BFD_RELOC_AVR32_ALIGN:
++ /* Nothing to do */
++ fixP->fx_done = FALSE;
++ break;
++ default:
++ as_fatal("reloc type %s not handled\n",
++ bfd_get_reloc_code_name(fixP->fx_r_type));
++ }
++ }
++}
++#endif
++
++arelent *
++tc_gen_reloc (asection *section ATTRIBUTE_UNUSED,
++ fixS *fixp)
++{
++ arelent *reloc;
++ bfd_reloc_code_real_type code;
++
++ reloc = xmalloc (sizeof (arelent));
++
++ reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
++ *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
++ reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
++ reloc->addend = fixp->fx_offset;
++ code = fixp->fx_r_type;
++
++ reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
++
++ if (reloc->howto == NULL)
++ {
++ as_bad_where (fixp->fx_file, fixp->fx_line,
++ _("cannot represent relocation %s in this object file format"),
++ bfd_get_reloc_code_name (code));
++ return NULL;
++ }
++
++ return reloc;
++}
++
++bfd_boolean
++avr32_force_reloc(fixS *fixP)
++{
++ if (linkrelax && fixP->fx_addsy
++ && !(S_GET_SEGMENT(fixP->fx_addsy)->flags & SEC_DEBUGGING)
++ && S_GET_SEGMENT(fixP->fx_addsy) != absolute_section)
++ {
++ pr_debug(stderr, "force reloc: addsy=%p, r_type=%d, sec=%s\n",
++ fixP->fx_addsy, fixP->fx_r_type, S_GET_SEGMENT(fixP->fx_addsy)->name);
++ return 1;
++ }
++
++ return generic_force_reloc(fixP);
++}
++
++bfd_boolean
++avr32_fix_adjustable(fixS *fixP)
++{
++ switch (fixP->fx_r_type)
++ {
++ /* GOT relocations can't have addends since BFD treats all
++ references to a given symbol the same. This means that we
++ must avoid section-relative references to local symbols when
++ dealing with these kinds of relocs */
++ case BFD_RELOC_AVR32_GOT32:
++ case BFD_RELOC_AVR32_GOT16:
++ case BFD_RELOC_AVR32_GOT8:
++ case BFD_RELOC_AVR32_GOT21S:
++ case BFD_RELOC_AVR32_GOT18SW:
++ case BFD_RELOC_AVR32_GOT16S:
++ case BFD_RELOC_AVR32_LDA_GOT:
++ case BFD_RELOC_AVR32_GOTCALL:
++ pr_debug("fix not adjustable\n");
++ return 0;
++
++ default:
++ break;
++ }
++
++ return 1;
++}
++
++/* When we want the linker to be able to relax the code, we need to
++ output a reloc for every .align directive requesting an alignment
++ to a four byte boundary or larger. If we don't do this, the linker
++ can't guarantee that the alignment is actually maintained in the
++ linker output.
++
++ TODO: Might as well insert proper NOPs while we're at it... */
++void
++avr32_handle_align(fragS *frag)
++{
++ if (linkrelax
++ && frag->fr_type == rs_align_code
++ && frag->fr_address + frag->fr_fix > 0
++ && frag->fr_offset > 0)
++ {
++ /* The alignment order (fr_offset) is stored in the addend. */
++ fix_new(frag, frag->fr_fix, 2, &abs_symbol, frag->fr_offset,
++ FALSE, BFD_RELOC_AVR32_ALIGN);
++ }
++}
++
++/* Relax_align. Advance location counter to next address that has 'alignment'
++ lowest order bits all 0s, return size of adjustment made. */
++relax_addressT
++avr32_relax_align(segT segment ATTRIBUTE_UNUSED,
++ fragS *fragP,
++ relax_addressT address)
++{
++ relax_addressT mask;
++ relax_addressT new_address;
++ int alignment;
++
++ alignment = fragP->fr_offset;
++ mask = ~((~0) << alignment);
++ new_address = (address + mask) & (~mask);
++
++ return new_address - address;
++}
++
++/* Turn a string in input_line_pointer into a floating point constant
++ of type type, and store the appropriate bytes in *litP. The number
++ of LITTLENUMS emitted is stored in *sizeP . An error message is
++ returned, or NULL on OK. */
++
++/* Equal to MAX_PRECISION in atof-ieee.c */
++#define MAX_LITTLENUMS 6
++
++char *
++md_atof (type, litP, sizeP)
++char type;
++char * litP;
++int * sizeP;
++{
++ int i;
++ int prec;
++ LITTLENUM_TYPE words [MAX_LITTLENUMS];
++ char * t;
++
++ switch (type)
++ {
++ case 'f':
++ case 'F':
++ case 's':
++ case 'S':
++ prec = 2;
++ break;
++
++ case 'd':
++ case 'D':
++ case 'r':
++ case 'R':
++ prec = 4;
++ break;
++
++ /* FIXME: Some targets allow other format chars for bigger sizes here. */
++
++ default:
++ * sizeP = 0;
++ return _("Bad call to md_atof()");
++ }
++
++ t = atof_ieee (input_line_pointer, type, words);
++ if (t)
++ input_line_pointer = t;
++ * sizeP = prec * sizeof (LITTLENUM_TYPE);
++
++ for (i = 0; i < prec; i++)
++ {
++ md_number_to_chars (litP, (valueT) words[i],
++ sizeof (LITTLENUM_TYPE));
++ litP += sizeof (LITTLENUM_TYPE);
++ }
++
++ return 0;
++}
++
++static char *avr32_end_of_match(char *cont, char *what)
++{
++ int len = strlen (what);
++
++ if (! is_part_of_name (cont[len])
++ && strncasecmp (cont, what, len) == 0)
++ return cont + len;
++
++ return NULL;
++}
++
++int
++avr32_parse_name (char const *name, expressionS *exp, char *nextchar)
++{
++ char *next = input_line_pointer;
++ char *next_end;
++
++ pr_debug("parse_name: %s, nextchar=%c (%02x)\n", name, *nextchar, *nextchar);
++
++ if (*nextchar == '(')
++ {
++ if (strcasecmp(name, "hi") == 0)
++ {
++ *next = *nextchar;
++
++ expression(exp);
++
++ if (exp->X_op == O_constant)
++ {
++ pr_debug(" -> constant hi(0x%08lx) -> 0x%04lx\n",
++ exp->X_add_number, exp->X_add_number >> 16);
++ exp->X_add_number = (exp->X_add_number >> 16) & 0xffff;
++ }
++ else
++ {
++ exp->X_md = exp->X_op;
++ exp->X_op = O_hi;
++ }
++
++ return 1;
++ }
++ else if (strcasecmp(name, "lo") == 0)
++ {
++ *next = *nextchar;
++
++ expression(exp);
++
++ if (exp->X_op == O_constant)
++ exp->X_add_number &= 0xffff;
++ else
++ {
++ exp->X_md = exp->X_op;
++ exp->X_op = O_lo;
++ }
++
++ return 1;
++ }
++ }
++ else if (*nextchar == '@')
++ {
++ exp->X_md = exp->X_op;
++
++ if ((next_end = avr32_end_of_match (next + 1, "got")))
++ exp->X_op = O_got;
++ else if ((next_end = avr32_end_of_match (next + 1, "tlsgd")))
++ exp->X_op = O_tlsgd;
++ /* Add more as needed */
++ else
++ {
++ char c;
++ input_line_pointer++;
++ c = get_symbol_end();
++ as_bad (_("unknown relocation override `%s'"), next + 1);
++ *input_line_pointer = c;
++ input_line_pointer = next;
++ return 0;
++ }
++
++ exp->X_op_symbol = NULL;
++ exp->X_add_symbol = symbol_find_or_make (name);
++ exp->X_add_number = 0;
++
++ *input_line_pointer = *nextchar;
++ input_line_pointer = next_end;
++ *nextchar = *input_line_pointer;
++ *input_line_pointer = '\0';
++ return 1;
++ }
++ else if (strcmp (name, "_GLOBAL_OFFSET_TABLE_") == 0)
++ {
++ if (!GOT_symbol)
++ GOT_symbol = symbol_find_or_make(name);
++
++ exp->X_add_symbol = GOT_symbol;
++ exp->X_op = O_symbol;
++ exp->X_add_number = 0;
++ return 1;
++ }
++
++ return 0;
++}
++
++static void
++s_rseg (int value ATTRIBUTE_UNUSED)
++{
++ /* Syntax: RSEG segment_name [:type] [NOROOT|ROOT] [(align)]
++ * Defaults:
++ * - type: undocumented ("typically CODE or DATA")
++ * - ROOT
++ * - align: 1 for code, 0 for others
++ *
++ * TODO: NOROOT is ignored. If gas supports discardable segments, it should
++ * be implemented.
++ */
++ char *name, *end;
++ int length, type, attr;
++ int align = 0;
++
++ SKIP_WHITESPACE();
++
++ end = input_line_pointer;
++ while (0 == strchr ("\n\t;:( ", *end))
++ end++;
++ if (end == input_line_pointer)
++ {
++ as_warn (_("missing name"));
++ ignore_rest_of_line();
++ return;
++ }
++
++ name = xmalloc (end - input_line_pointer + 1);
++ memcpy (name, input_line_pointer, end - input_line_pointer);
++ name[end - input_line_pointer] = '\0';
++ input_line_pointer = end;
++
++ SKIP_WHITESPACE();
++
++ type = SHT_NULL;
++ attr = 0;
++
++ if (*input_line_pointer == ':')
++ {
++ /* Skip the colon */
++ ++input_line_pointer;
++ SKIP_WHITESPACE();
++
++ /* Possible options at this point:
++ * - flag (ROOT or NOROOT)
++ * - a segment type
++ */
++ end = input_line_pointer;
++ while (0 == strchr ("\n\t;:( ", *end))
++ end++;
++ length = end - input_line_pointer;
++ if (((length == 4) && (0 == strncasecmp( input_line_pointer, "ROOT", 4))) ||
++ ((length == 6) && (0 == strncasecmp( input_line_pointer, "NOROOT", 6))))
++ {
++ /* Ignore ROOT/NOROOT */
++ input_line_pointer = end;
++ }
++ else
++ {
++ /* Must be a segment type */
++ switch (*input_line_pointer)
++ {
++ case 'C':
++ case 'c':
++ if ((length == 4) &&
++ (0 == strncasecmp (input_line_pointer, "CODE", 4)))
++ {
++ attr |= SHF_ALLOC | SHF_EXECINSTR;
++ type = SHT_PROGBITS;
++ align = 1;
++ break;
++ }
++ if ((length == 5) &&
++ (0 == strncasecmp (input_line_pointer, "CONST", 5)))
++ {
++ attr |= SHF_ALLOC;
++ type = SHT_PROGBITS;
++ break;
++ }
++ goto de_fault;
++
++ case 'D':
++ case 'd':
++ if ((length == 4) &&
++ (0 == strncasecmp (input_line_pointer, "DATA", 4)))
++ {
++ attr |= SHF_ALLOC | SHF_WRITE;
++ type = SHT_PROGBITS;
++ break;
++ }
++ goto de_fault;
++
++ /* TODO: Add FAR*, HUGE*, IDATA and NEAR* if necessary */
++
++ case 'U':
++ case 'u':
++ if ((length == 7) &&
++ (0 == strncasecmp (input_line_pointer, "UNTYPED", 7)))
++ break;
++ goto de_fault;
++
++ /* TODO: Add XDATA and ZPAGE if necessary */
++
++ de_fault:
++ default:
++ as_warn (_("unrecognized segment type"));
++ }
++
++ input_line_pointer = end;
++ SKIP_WHITESPACE();
++
++ if (*input_line_pointer == ':')
++ {
++ /* ROOT/NOROOT */
++ ++input_line_pointer;
++ SKIP_WHITESPACE();
++
++ end = input_line_pointer;
++ while (0 == strchr ("\n\t;:( ", *end))
++ end++;
++ length = end - input_line_pointer;
++ if (! ((length == 4) &&
++ (0 == strncasecmp( input_line_pointer, "ROOT", 4))) &&
++ ! ((length == 6) &&
++ (0 == strncasecmp( input_line_pointer, "NOROOT", 6))))
++ {
++ as_warn (_("unrecognized segment flag"));
++ }
++
++ input_line_pointer = end;
++ SKIP_WHITESPACE();
++ }
++ }
++ }
++
++ if (*input_line_pointer == '(')
++ {
++ align = get_absolute_expression ();
++ }
++
++ demand_empty_rest_of_line();
++
++ obj_elf_change_section (name, type, attr, 0, NULL, 0, 0);
++#ifdef AVR32_DEBUG
++ fprintf( stderr, "RSEG: Changed section to %s, type: 0x%x, attr: 0x%x\n",
++ name, type, attr );
++ fprintf( stderr, "RSEG: Aligning to 2**%d\n", align );
++#endif
++
++ if (align > 15)
++ {
++ align = 15;
++ as_warn (_("alignment too large: %u assumed"), align);
++ }
++
++ /* Hope not, that is */
++ assert (now_seg != absolute_section);
++
++ /* Only make a frag if we HAVE to... */
++ if (align != 0 && !need_pass_2)
++ {
++ if (subseg_text_p (now_seg))
++ frag_align_code (align, 0);
++ else
++ frag_align (align, 0, 0);
++ }
++
++ record_alignment (now_seg, align - OCTETS_PER_BYTE_POWER);
++}
++
++/* vim: syntax=c sw=2
++ */
+--- /dev/null
++++ b/gas/config/tc-avr32.h
+@@ -0,0 +1,325 @@
++/* Assembler definitions for AVR32.
++ Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of GAS, the GNU Assembler.
++
++ GAS is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2, or (at your option)
++ any later version.
++
++ GAS is distributed in the hope that it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GAS; see the file COPYING. If not, write to the Free
++ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++
++#if 0
++#define DEBUG
++#define DEBUG1
++#define DEBUG2
++#define DEBUG3
++#define DEBUG4
++#define DEBUG5
++#endif
++
++/* Are we trying to be compatible with the IAR assembler? (--iar) */
++extern int avr32_iarcompat;
++
++/* By convention, you should define this macro in the `.h' file. For
++ example, `tc-m68k.h' defines `TC_M68K'. You might have to use this
++ if it is necessary to add CPU specific code to the object format
++ file. */
++#define TC_AVR32
++
++/* This macro is the BFD target name to use when creating the output
++ file. This will normally depend upon the `OBJ_FMT' macro. */
++#define TARGET_FORMAT "elf32-avr32"
++
++/* This macro is the BFD architecture to pass to `bfd_set_arch_mach'. */
++#define TARGET_ARCH bfd_arch_avr32
++
++/* This macro is the BFD machine number to pass to
++ `bfd_set_arch_mach'. If it is not defined, GAS will use 0. */
++#define TARGET_MACH 0
++
++/* UNDOCUMENTED: Allow //-style comments */
++#define DOUBLESLASH_LINE_COMMENTS
++
++/* You should define this macro to be non-zero if the target is big
++ endian, and zero if the target is little endian. */
++#define TARGET_BYTES_BIG_ENDIAN 1
++
++/* FIXME: It seems that GAS only expects a one-byte opcode...
++ #define NOP_OPCODE 0xd703 */
++
++/* If you define this macro, GAS will warn about the use of
++ nonstandard escape sequences in a string. */
++#undef ONLY_STANDARD_ESCAPES
++
++#define DWARF2_FORMAT(SEC) dwarf2_format_32bit
++
++/* Instructions are either 2 or 4 bytes long */
++/* #define DWARF2_LINE_MIN_INSN_LENGTH 2 */
++
++/* GAS will call this function for any expression that can not be
++ recognized. When the function is called, `input_line_pointer'
++ will point to the start of the expression. */
++#define md_operand(x)
++
++#define md_parse_name(name, expr, mode, c) avr32_parse_name(name, expr, c)
++extern int avr32_parse_name(const char *, struct expressionS *, char *);
++
++/* You may define this macro to generate a fixup for a data
++ allocation pseudo-op. */
++#define TC_CONS_FIX_NEW(FRAG, OFF, LEN, EXP) \
++ avr32_cons_fix_new(FRAG, OFF, LEN, EXP)
++void avr32_cons_fix_new (fragS *, int, int, expressionS *);
++
++/* `extsym - .' expressions can be emitted using PC-relative relocs */
++#define DIFF_EXPR_OK
++
++/* This is used to construct expressions out of @gotoff, etc. The
++ relocation type is stored in X_md */
++#define O_got O_md1
++#define O_hi O_md2
++#define O_lo O_md3
++#define O_tlsgd O_md4
++
++/* You may define this macro to parse an expression used in a data
++ allocation pseudo-op such as `.word'. You can use this to
++ recognize relocation directives that may appear in such directives. */
++/* #define TC_PARSE_CONS_EXPRESSION(EXPR,N) avr_parse_cons_expression (EXPR,N)
++ void avr_parse_cons_expression (expressionS *exp, int nbytes); */
++
++/* This should just call either `number_to_chars_bigendian' or
++ `number_to_chars_littleendian', whichever is appropriate. On
++ targets like the MIPS which support options to change the
++ endianness, which function to call is a runtime decision. On
++ other targets, `md_number_to_chars' can be a simple macro. */
++#define md_number_to_chars number_to_chars_bigendian
++
++/* `md_short_jump_size'
++ `md_long_jump_size'
++ `md_create_short_jump'
++ `md_create_long_jump'
++ If `WORKING_DOT_WORD' is defined, GAS will not do broken word
++ processing (*note Broken words::.). Otherwise, you should set
++ `md_short_jump_size' to the size of a short jump (a jump that is
++ just long enough to jump around a long jmp) and
++ `md_long_jump_size' to the size of a long jump (a jump that can go
++ anywhere in the function), You should define
++ `md_create_short_jump' to create a short jump around a long jump,
++ and define `md_create_long_jump' to create a long jump. */
++#define WORKING_DOT_WORD
++
++/* If you define this macro, it means that `tc_gen_reloc' may return
++ multiple relocation entries for a single fixup. In this case, the
++ return value of `tc_gen_reloc' is a pointer to a null terminated
++ array. */
++#undef RELOC_EXPANSION_POSSIBLE
++
++/* If you define this macro, GAS will not require pseudo-ops to start with a .
++ character. */
++#define NO_PSEUDO_DOT (avr32_iarcompat)
++
++/* The IAR assembler uses $ as the location counter. Unfortunately, we
++ can't make this dependent on avr32_iarcompat... */
++#define DOLLAR_DOT
++
++/* Values passed to md_apply_fix3 don't include the symbol value. */
++#define MD_APPLY_SYM_VALUE(FIX) 0
++
++/* The number of bytes to put into a word in a listing. This affects
++ the way the bytes are clumped together in the listing. For
++ example, a value of 2 might print `1234 5678' where a value of 1
++ would print `12 34 56 78'. The default value is 4. */
++#define LISTING_WORD_SIZE 4
++
++/* extern const struct relax_type md_relax_table[];
++#define TC_GENERIC_RELAX_TABLE md_relax_table */
++
++/*
++ An `.lcomm' directive with no explicit alignment parameter will use
++ this macro to set P2VAR to the alignment that a request for SIZE
++ bytes will have. The alignment is expressed as a power of two. If
++ no alignment should take place, the macro definition should do
++ nothing. Some targets define a `.bss' directive that is also
++ affected by this macro. The default definition will set P2VAR to
++ the truncated power of two of sizes up to eight bytes.
++
++ We want doublewords to be word-aligned, so we're going to modify the
++ default definition a tiny bit.
++*/
++#define TC_IMPLICIT_LCOMM_ALIGNMENT(SIZE, P2VAR) \
++ do \
++ { \
++ if ((SIZE) >= 4) \
++ (P2VAR) = 2; \
++ else if ((SIZE) >= 2) \
++ (P2VAR) = 1; \
++ else \
++ (P2VAR) = 0; \
++ } \
++ while (0)
++
++/* When relaxing, we need to generate relocations for alignment
++ directives. */
++#define HANDLE_ALIGN(frag) avr32_handle_align(frag)
++extern void avr32_handle_align(fragS *);
++
++/* See internals doc for explanation. Oh wait...
++ Now, can you guess where "alignment" comes from? ;-) */
++#define MAX_MEM_FOR_RS_ALIGN_CODE ((1 << alignment) - 1)
++
++/* We need to stop gas from reducing certain expressions (e.g. GOT
++ references) */
++#define tc_fix_adjustable(fix) avr32_fix_adjustable(fix)
++extern bfd_boolean avr32_fix_adjustable(struct fix *);
++
++/* The linker needs to be passed a little more information when relaxing. */
++#define TC_FORCE_RELOCATION(fix) avr32_force_reloc(fix)
++extern bfd_boolean avr32_force_reloc(struct fix *);
++
++/* I'm tired of working around all the madness in fixup_segment().
++ This hook will do basically the same things as the generic code,
++ and then it will "goto" right past it. */
++#define TC_VALIDATE_FIX(FIX, SEG, SKIP) \
++ do \
++ { \
++ avr32_process_fixup(FIX, SEG); \
++ if (!(FIX)->fx_done) \
++ ++seg_reloc_count; \
++ goto SKIP; \
++ } \
++ while (0)
++extern void avr32_process_fixup(struct fix *fixP, segT this_segment);
++
++/* Positive values of TC_FX_SIZE_SLACK allow a target to define
++ fixups that far past the end of a frag. Having such fixups
++ is of course most most likely a bug in setting fx_size correctly.
++ A negative value disables the fixup check entirely, which is
++ appropriate for something like the Renesas / SuperH SH_COUNT
++ reloc. */
++/* This target is buggy, and sets fix size too large. */
++#define TC_FX_SIZE_SLACK(FIX) -1
++
++/* We don't want the gas core to make any assumptions about our way of
++ doing linkrelaxing. */
++#define TC_LINKRELAX_FIXUP(SEG) 0
++
++/* ... but we do want it to insert lots of padding. */
++#define LINKER_RELAXING_SHRINKS_ONLY
++
++/* Better do it ourselves, really... */
++#define TC_RELAX_ALIGN(SEG, FRAG, ADDR) avr32_relax_align(SEG, FRAG, ADDR)
++extern relax_addressT
++avr32_relax_align(segT segment, fragS *fragP, relax_addressT address);
++
++/* Use line number format that is amenable to linker relaxation. */
++#define DWARF2_USE_FIXED_ADVANCE_PC (linkrelax != 0)
++
++/* This is called by write_object_file() just before symbols are
++ attempted converted into section symbols. */
++#define tc_frob_file_before_adjust() avr32_frob_file()
++extern void avr32_frob_file(void);
++
++/* If you define this macro, GAS will call it at the end of each input
++ file. */
++#define md_cleanup() avr32_cleanup()
++extern void avr32_cleanup(void);
++
++/* There's an AVR32-specific hack in operand() which creates O_md
++ expressions when encountering HWRD or LWRD. We need to generate
++ proper relocs for them */
++/* #define md_cgen_record_fixup_exp avr32_cgen_record_fixup_exp */
++
++/* I needed to add an extra hook in gas_cgen_finish_insn() for
++ conversion of O_md* operands because md_cgen_record_fixup_exp()
++ isn't called for relaxable insns */
++/* #define md_cgen_convert_expr(exp, opinfo) avr32_cgen_convert_expr(exp, opinfo)
++ int avr32_cgen_convert_expr(expressionS *, int); */
++
++/* #define tc_gen_reloc gas_cgen_tc_gen_reloc */
++
++/* If you define this macro, it should return the position from which
++ the PC relative adjustment for a PC relative fixup should be
++ made. On many processors, the base of a PC relative instruction is
++ the next instruction, so this macro would return the length of an
++ instruction, plus the address of the PC relative fixup. The latter
++ can be calculated as fixp->fx_where + fixp->fx_frag->fr_address. */
++extern long md_pcrel_from_section (struct fix *, segT);
++#define MD_PCREL_FROM_SECTION(FIX, SEC) md_pcrel_from_section (FIX, SEC)
++
++#define LOCAL_LABEL(name) (name[0] == '.' && (name[1] == 'L'))
++#define LOCAL_LABELS_FB 1
++
++struct avr32_relaxer
++{
++ int (*estimate_size)(fragS *, segT);
++ long (*relax_frag)(segT, fragS *, long);
++ void (*convert_frag)(bfd *, segT, fragS *);
++};
++
++/* AVR32 has quite complex instruction coding, which means we need
++ * lots of information in order to do the right thing during relaxing
++ * (basically, we need to be able to reconstruct a whole new opcode if
++ * necessary) */
++#define TC_FRAG_TYPE struct avr32_frag_data
++
++struct cpool;
++
++struct avr32_frag_data
++{
++ /* TODO: Maybe add an expression object here so that we can use
++ fix_new_exp() in md_convert_frag? We may have to decide
++ pcrel-ness in md_estimate_size_before_relax() as well...or we
++ might do it when parsing. Doing it while parsing may fail
++ because the sub_symbol is undefined then... */
++ int pcrel;
++ int force_extended;
++ int reloc_info;
++ struct avr32_relaxer *relaxer;
++ expressionS exp;
++
++ /* Points to associated constant pool, for use by LDA and CALL in
++ non-pic mode, and when relaxing the .cpool directive */
++ struct cpool *pool;
++ unsigned int pool_entry;
++};
++
++/* We will have to initialize the fields explicitly when needed */
++#define TC_FRAG_INIT(fragP)
++
++#define md_estimate_size_before_relax(fragP, segT) \
++ ((fragP)->tc_frag_data.relaxer->estimate_size(fragP, segT))
++#define md_relax_frag(segment, fragP, stretch) \
++ ((fragP)->tc_frag_data.relaxer->relax_frag(segment, fragP, stretch))
++#define md_convert_frag(abfd, segment, fragP) \
++ ((fragP)->tc_frag_data.relaxer->convert_frag(abfd, segment, fragP))
++
++#define TC_FIX_TYPE struct avr32_fix_data
++
++struct avr32_fix_data
++{
++ const struct avr32_ifield *ifield;
++ unsigned int align;
++ long min;
++ long max;
++};
++
++#define TC_INIT_FIX_DATA(fixP) \
++ do \
++ { \
++ (fixP)->tc_fix_data.ifield = NULL; \
++ (fixP)->tc_fix_data.align = 0; \
++ (fixP)->tc_fix_data.min = 0; \
++ (fixP)->tc_fix_data.max = 0; \
++ } \
++ while (0)
+--- a/gas/configure.tgt
++++ b/gas/configure.tgt
+@@ -33,6 +33,7 @@ case ${cpu} in
+ am33_2.0) cpu_type=mn10300 endian=little ;;
+ arm*be|arm*b) cpu_type=arm endian=big ;;
+ arm*) cpu_type=arm endian=little ;;
++ avr32*) cpu_type=avr32 endian=big ;;
+ bfin*) cpu_type=bfin endian=little ;;
+ c4x*) cpu_type=tic4x ;;
+ cr16*) cpu_type=cr16 endian=little ;;
+@@ -136,6 +137,9 @@ case ${generic_target} in
+
+ cr16-*-elf*) fmt=elf ;;
+
++ avr32-*-linux*) fmt=elf em=linux bfd_gas=yes ;;
++ avr32*) fmt=elf bfd_gas=yes ;;
++
+ cris-*-linux-* | crisv32-*-linux-*)
+ fmt=multi em=linux ;;
+ cris-*-* | crisv32-*-*) fmt=multi ;;
+--- a/gas/doc/all.texi
++++ b/gas/doc/all.texi
+@@ -30,6 +30,7 @@
+ @set ARC
+ @set ARM
+ @set AVR
++@set AVR32
+ @set Blackfin
+ @set CR16
+ @set CRIS
+--- a/gas/doc/asconfig.texi
++++ b/gas/doc/asconfig.texi
+@@ -30,6 +30,7 @@
+ @set ARC
+ @set ARM
+ @set AVR
++@set AVR32
+ @set Blackfin
+ @set CR16
+ @set CRIS
+--- a/gas/doc/as.texinfo
++++ b/gas/doc/as.texinfo
+@@ -6731,6 +6731,9 @@ subject, see the hardware manufacturer's
+ @ifset AVR
+ * AVR-Dependent:: AVR Dependent Features
+ @end ifset
++@ifset AVR32
++* AVR32-Dependent:: AVR32 Dependent Features
++@end ifset
+ @ifset Blackfin
+ * Blackfin-Dependent:: Blackfin Dependent Features
+ @end ifset
+@@ -6866,6 +6869,10 @@ subject, see the hardware manufacturer's
+ @include c-avr.texi
+ @end ifset
+
++@ifset AVR32
++@include c-avr32.texi
++@end ifset
++
+ @ifset Blackfin
+ @include c-bfin.texi
+ @end ifset
+--- /dev/null
++++ b/gas/doc/c-avr32.texi
+@@ -0,0 +1,244 @@
++@c Copyright 2005, 2006, 2007, 2008, 2009
++@c Atmel Corporation
++@c This is part of the GAS manual.
++@c For copying conditions, see the file as.texinfo.
++
++@ifset GENERIC
++@page
++@node AVR32-Dependent
++@chapter AVR32 Dependent Features
++@end ifset
++
++@ifclear GENERIC
++@node Machine Dependencies
++@chapter AVR32 Dependent Features
++@end ifclear
++
++@cindex AVR32 support
++@menu
++* AVR32 Options:: Options
++* AVR32 Syntax:: Syntax
++* AVR32 Directives:: Directives
++* AVR32 Opcodes:: Opcodes
++@end menu
++
++@node AVR32 Options
++@section Options
++@cindex AVR32 options
++@cindex options for AVR32
++
++@table @code
++
++@cindex @code{--pic} command line option, AVR32
++@cindex PIC code generation for AVR32
++@item --pic
++This option specifies that the output of the assembler should be marked
++as position-independent code (PIC). It will also ensure that
++pseudo-instructions that deal with address calculation are output as
++PIC, and that all absolute address references in the code are marked as
++such.
++
++@cindex @code{--linkrelax} command line option, AVR32
++@item --linkrelax
++This option specifies that the output of the assembler should be marked
++as linker-relaxable. It will also ensure that all PC-relative operands
++that may change during linker relaxation get appropriate relocations.
++
++@end table
++
++
++@node AVR32 Syntax
++@section Syntax
++@menu
++* AVR32-Chars:: Special Characters
++* AVR32-Symrefs:: Symbol references
++@end menu
++
++@node AVR32-Chars
++@subsection Special Characters
++
++@cindex line comment character, AVR32
++@cindex AVR32 line comment character
++The presence of a @samp{//} on a line indicates the start of a comment
++that extends to the end of the current line. If a @samp{#} appears as
++the first character of a line, the whole line is treated as a comment.
++
++@cindex line separator, AVR32
++@cindex statement separator, AVR32
++@cindex AVR32 line separator
++The @samp{;} character can be used instead of a newline to separate
++statements.
++
++@node AVR32-Symrefs
++@subsection Symbol references
++
++The absolute value of a symbol can be obtained by simply naming the
++symbol. However, as AVR32 symbols have 32-bit values, most symbols have
++values that are outside the range of any instructions.
++
++Instructions that take a PC-relative offset, e.g. @code{lddpc} or
++@code{rcall}, can also reference a symbol by simply naming the symbol
++(no explicit calculations necessary). In this case, the assembler or
++linker subtracts the address of the instruction from the symbol's value
++and inserts the result into the instruction. Note that even though an
++overflow is less likely to happen for a relative reference than for an
++absolute reference, the assembler or linker will generate an error if
++the referenced symbol is too far away from the current location.
++
++Relative references can be used for data as well. For example:
++
++@smallexample
++ lddpc r0, 2f
++1: add r0, pc
++ ...
++ .align 2
++2: .int @var{some_symbol} - 1b
++@end smallexample
++
++Here, r0 will end up with the run-time address of @var{some_symbol} even
++if the program was loaded at a different address than it was linked
++(position-independent code).
++
++@subsubsection Symbol modifiers
++
++@table @code
++
++@item @code{hi(@var{symbol})}
++Evaluates to the value of the symbol shifted right 16 bits. This will
++work even if @var{symbol} is defined in a different module.
++
++@item @code{lo(@var{symbol})}
++Evaluates to the low 16 bits of the symbol's value. This will work even
++if @var{symbol} is defined in a different module.
++
++@item @code{@var{symbol}@@got}
++Create a GOT entry for @var{symbol} and return the offset of that entry
++relative to the GOT base.
++
++@end table
++
++
++@node AVR32 Directives
++@section Directives
++@cindex machine directives, AVR32
++@cindex AVR32 directives
++
++@table @code
++
++@cindex @code{.cpool} directive, AVR32
++@item .cpool
++This directive causes the current contents of the constant pool to be
++dumped into the current section at the current location (aligned to a
++word boundary). @code{GAS} maintains a separate constant pool for each
++section and each sub-section. The @code{.cpool} directive will only
++affect the constant pool of the current section and sub-section. At the
++end of assembly, all remaining, non-empty constant pools will
++automatically be dumped.
++
++@end table
++
++
++@node AVR32 Opcodes
++@section Opcodes
++@cindex AVR32 opcodes
++@cindex opcodes for AVR32
++
++@code{@value{AS}} implements all the standard AVR32 opcodes. It also
++implements several pseudo-opcodes, which are recommended to use wherever
++possible because they give the tool chain better freedom to generate
++optimal code.
++
++@table @code
++
++@cindex @code{LDA.W reg, symbol} pseudo op, AVR32
++@item LDA.W
++@smallexample
++ lda.w @var{reg}, @var{symbol}
++@end smallexample
++
++This instruction will load the address of @var{symbol} into
++@var{reg}. The instruction will evaluate to one of the following,
++depending on the relative distance to the symbol, the relative distance
++to the constant pool and whether the @code{--pic} option has been
++specified. If the @code{--pic} option has not been specified, the
++alternatives are as follows:
++@smallexample
++ /* @var{symbol} evaluates to a small enough value */
++ mov @var{reg}, @var{symbol}
++
++ /* (. - @var{symbol}) evaluates to a small enough value */
++ sub @var{reg}, pc, . - @var{symbol}
++
++ /* Constant pool is close enough */
++ lddpc @var{reg}, @var{cpent}
++ ...
++@var{cpent}:
++ .long @var{symbol}
++
++ /* Otherwise (not implemented yet, probably not necessary) */
++ mov @var{reg}, lo(@var{symbol})
++ orh @var{reg}, hi(@var{symbol})
++@end smallexample
++
++If the @code{--pic} option has been specified, the alternatives are as
++follows:
++@smallexample
++ /* (. - @var{symbol}) evaluates to a small enough value */
++ sub @var{reg}, pc, . - @var{symbol}
++
++ /* If @code{--linkrelax} not specified */
++ ld.w @var{reg}, r6[@var{symbol}@@got]
++
++ /* Otherwise */
++ mov @var{reg}, @var{symbol}@@got / 4
++ ld.w @var{reg}, r6[@var{reg} << 2]
++@end smallexample
++
++If @var{symbol} is not defined in the same file and section as the
++@code{LDA.W} instruction, the most pessimistic alternative of the
++above is selected. The linker may convert it back into the most
++optimal alternative when the final value of all symbols is known.
++
++@cindex @code{CALL symbol} pseudo op, AVR32
++@item CALL
++@smallexample
++ call @var{symbol}
++@end smallexample
++
++This instruction will insert code to call the subroutine identified by
++@var{symbol}. It will evaluate to one of the following, depending on
++the relative distance to the symbol as well as the @code{--linkrelax}
++and @code{--pic} command-line options.
++
++If @var{symbol} is defined in the same section and input file, and the
++distance is small enough, an @code{rcall} instruction is inserted:
++@smallexample
++ rcall @var{symbol}
++@end smallexample
++
++Otherwise, if the @code{--pic} option has not been specified:
++@smallexample
++ mcall @var{cpent}
++ ...
++@var{cpent}:
++ .long @var{symbol}
++@end smallexample
++
++Finally, if nothing else fits and the @code{--pic} option has been
++specified, the assembler will indirect the call through the Global
++Offset Table:
++@smallexample
++ /* If @code{--linkrelax} not specified */
++ mcall r6[@var{symbol}@@got]
++
++ /* If @code{--linkrelax} specified */
++ mov lr, @var{symbol}@@got / 4
++ ld.w lr, r6[lr << 2]
++ icall lr
++@end smallexample
++
++The linker, after determining the final value of @var{symbol}, may
++convert any of these into more optimal alternatives. This includes
++deleting any superfluous constant pool- and GOT-entries.
++
++@end table
+--- a/gas/doc/Makefile.am
++++ b/gas/doc/Makefile.am
+@@ -33,6 +33,7 @@ CPU_DOCS = \
+ c-arc.texi \
+ c-arm.texi \
+ c-avr.texi \
++ c-avr32.texi \
+ c-bfin.texi \
+ c-cr16.texi \
+ c-d10v.texi \
+--- a/gas/Makefile.am
++++ b/gas/Makefile.am
+@@ -43,6 +43,7 @@ CPU_TYPES = \
+ arc \
+ arm \
+ avr \
++ avr32 \
+ bfin \
+ cr16 \
+ cris \
+@@ -244,6 +245,7 @@ TARGET_CPU_CFILES = \
+ config/tc-arc.c \
+ config/tc-arm.c \
+ config/tc-avr.c \
++ config/tc-avr32.c \
+ config/tc-bfin.c \
+ config/tc-cr16.c \
+ config/tc-cris.c \
+@@ -307,6 +309,7 @@ TARGET_CPU_HFILES = \
+ config/tc-arc.h \
+ config/tc-arm.h \
+ config/tc-avr.h \
++ config/tc-avr32.h \
+ config/tc-bfin.h \
+ config/tc-cr16.h \
+ config/tc-cris.h \
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/aliases.d
+@@ -0,0 +1,19 @@
++#as:
++#objdump: -dr
++#name: aliases
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <ld_nodisp>:
++ 0: 19 80 [ \t]+ld\.ub r0,r12\[0x0\]
++ 2: f9 20 00 00[ \t]+ld\.sb r0,r12\[0\]
++ 6: 98 80 [ \t]+ld\.uh r0,r12\[0x0\]
++ 8: 98 00 [ \t]+ld\.sh r0,r12\[0x0\]
++ a: 78 00 [ \t]+ld\.w r0,r12\[0x0\]
++
++0000000c <st_nodisp>:
++ c: b8 80 [ \t]+st\.b r12\[0x0\],r0
++ e: b8 00 [ \t]+st\.h r12\[0x0\],r0
++ 10: 99 00 [ \t]+st\.w r12\[0x0\],r0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/aliases.s
+@@ -0,0 +1,14 @@
++ .text
++ .global ld_nodisp
++ld_nodisp:
++ ld.ub r0, r12
++ ld.sb r0, r12
++ ld.uh r0, r12
++ ld.sh r0, r12
++ ld.w r0, r12
++
++ .global st_nodisp
++st_nodisp:
++ st.b r12, r0
++ st.h r12, r0
++ st.w r12, r0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/allinsn.d
+@@ -0,0 +1,2987 @@
++#as:
++#objdump: -dr
++#name: allinsn
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++[0-9a-f]* <ld_d5>:
++ *[0-9a-f]*: fe 0f 02 3e ld\.d lr,pc\[pc<<0x3\]
++ *[0-9a-f]*: e0 00 02 00 ld\.d r0,r0\[r0\]
++ *[0-9a-f]*: ea 05 02 26 ld\.d r6,r5\[r5<<0x2\]
++ *[0-9a-f]*: e8 04 02 14 ld\.d r4,r4\[r4<<0x1\]
++ *[0-9a-f]*: fc 0e 02 1e ld\.d lr,lr\[lr<<0x1\]
++ *[0-9a-f]*: e6 0d 02 2a ld\.d r10,r3\[sp<<0x2\]
++ *[0-9a-f]*: f4 06 02 28 ld\.d r8,r10\[r6<<0x2\]
++ *[0-9a-f]*: ee 09 02 02 ld\.d r2,r7\[r9\]
++
++[0-9a-f]* <ld_w5>:
++ *[0-9a-f]*: fe 0f 03 0f ld\.w pc,pc\[pc\]
++ *[0-9a-f]*: f8 0c 03 3c ld\.w r12,r12\[r12<<0x3\]
++ *[0-9a-f]*: ea 05 03 25 ld\.w r5,r5\[r5<<0x2\]
++ *[0-9a-f]*: e8 04 03 14 ld\.w r4,r4\[r4<<0x1\]
++ *[0-9a-f]*: fc 0e 03 1e ld\.w lr,lr\[lr<<0x1\]
++ *[0-9a-f]*: f2 09 03 02 ld\.w r2,r9\[r9\]
++ *[0-9a-f]*: e4 06 03 0b ld\.w r11,r2\[r6\]
++ *[0-9a-f]*: e4 0d 03 30 ld\.w r0,r2\[sp<<0x3\]
++
++[0-9a-f]* <ld_sh5>:
++ *[0-9a-f]*: fe 0f 04 0f ld\.sh pc,pc\[pc\]
++ *[0-9a-f]*: f8 0c 04 3c ld\.sh r12,r12\[r12<<0x3\]
++ *[0-9a-f]*: ea 05 04 25 ld\.sh r5,r5\[r5<<0x2\]
++ *[0-9a-f]*: e8 04 04 14 ld\.sh r4,r4\[r4<<0x1\]
++ *[0-9a-f]*: fc 0e 04 1e ld\.sh lr,lr\[lr<<0x1\]
++ *[0-9a-f]*: e0 0f 04 2b ld\.sh r11,r0\[pc<<0x2\]
++ *[0-9a-f]*: fa 06 04 2a ld\.sh r10,sp\[r6<<0x2\]
++ *[0-9a-f]*: e4 02 04 0c ld\.sh r12,r2\[r2\]
++
++[0-9a-f]* <ld_uh5>:
++ *[0-9a-f]*: fe 0f 05 0f ld\.uh pc,pc\[pc\]
++ *[0-9a-f]*: f8 0c 05 3c ld\.uh r12,r12\[r12<<0x3\]
++ *[0-9a-f]*: ea 05 05 25 ld\.uh r5,r5\[r5<<0x2\]
++ *[0-9a-f]*: e8 04 05 14 ld\.uh r4,r4\[r4<<0x1\]
++ *[0-9a-f]*: fc 0e 05 1e ld\.uh lr,lr\[lr<<0x1\]
++ *[0-9a-f]*: fe 0e 05 38 ld\.uh r8,pc\[lr<<0x3\]
++ *[0-9a-f]*: e2 0f 05 16 ld\.uh r6,r1\[pc<<0x1\]
++ *[0-9a-f]*: fc 0d 05 16 ld\.uh r6,lr\[sp<<0x1\]
++
++[0-9a-f]* <ld_sb2>:
++ *[0-9a-f]*: fe 0f 06 0f ld\.sb pc,pc\[pc\]
++ *[0-9a-f]*: f8 0c 06 3c ld\.sb r12,r12\[r12<<0x3\]
++ *[0-9a-f]*: ea 05 06 25 ld\.sb r5,r5\[r5<<0x2\]
++ *[0-9a-f]*: e8 04 06 14 ld\.sb r4,r4\[r4<<0x1\]
++ *[0-9a-f]*: fc 0e 06 1e ld\.sb lr,lr\[lr<<0x1\]
++ *[0-9a-f]*: e2 0f 06 39 ld\.sb r9,r1\[pc<<0x3\]
++ *[0-9a-f]*: e6 0b 06 10 ld\.sb r0,r3\[r11<<0x1\]
++ *[0-9a-f]*: ea 05 06 1a ld\.sb r10,r5\[r5<<0x1\]
++
++[0-9a-f]* <ld_ub5>:
++ *[0-9a-f]*: fe 0f 07 0f ld\.ub pc,pc\[pc\]
++ *[0-9a-f]*: f8 0c 07 3c ld\.ub r12,r12\[r12<<0x3\]
++ *[0-9a-f]*: ea 05 07 25 ld\.ub r5,r5\[r5<<0x2\]
++ *[0-9a-f]*: e8 04 07 14 ld\.ub r4,r4\[r4<<0x1\]
++ *[0-9a-f]*: fc 0e 07 1e ld\.ub lr,lr\[lr<<0x1\]
++ *[0-9a-f]*: f8 07 07 36 ld\.ub r6,r12\[r7<<0x3\]
++ *[0-9a-f]*: ec 0c 07 02 ld\.ub r2,r6\[r12\]
++ *[0-9a-f]*: ee 0b 07 10 ld\.ub r0,r7\[r11<<0x1\]
++
++[0-9a-f]* <st_d5>:
++ *[0-9a-f]*: fe 0f 08 0e st\.d pc\[pc\],lr
++ *[0-9a-f]*: f8 0c 08 3c st\.d r12\[r12<<0x3\],r12
++ *[0-9a-f]*: ea 05 08 26 st\.d r5\[r5<<0x2\],r6
++ *[0-9a-f]*: e8 04 08 14 st\.d r4\[r4<<0x1\],r4
++ *[0-9a-f]*: fc 0e 08 1e st\.d lr\[lr<<0x1\],lr
++ *[0-9a-f]*: e2 09 08 14 st\.d r1\[r9<<0x1\],r4
++ *[0-9a-f]*: f4 02 08 14 st\.d r10\[r2<<0x1\],r4
++ *[0-9a-f]*: f8 06 08 0e st\.d r12\[r6\],lr
++
++[0-9a-f]* <st_w5>:
++ *[0-9a-f]*: fe 0f 09 0f st\.w pc\[pc\],pc
++ *[0-9a-f]*: f8 0c 09 3c st\.w r12\[r12<<0x3\],r12
++ *[0-9a-f]*: ea 05 09 25 st\.w r5\[r5<<0x2\],r5
++ *[0-9a-f]*: e8 04 09 14 st\.w r4\[r4<<0x1\],r4
++ *[0-9a-f]*: fc 0e 09 1e st\.w lr\[lr<<0x1\],lr
++ *[0-9a-f]*: e2 0a 09 03 st\.w r1\[r10\],r3
++ *[0-9a-f]*: e0 0a 09 19 st\.w r0\[r10<<0x1\],r9
++ *[0-9a-f]*: e8 05 09 3f st\.w r4\[r5<<0x3\],pc
++
++[0-9a-f]* <st_h5>:
++ *[0-9a-f]*: fe 0f 0a 0f st\.h pc\[pc\],pc
++ *[0-9a-f]*: f8 0c 0a 3c st\.h r12\[r12<<0x3\],r12
++ *[0-9a-f]*: ea 05 0a 25 st\.h r5\[r5<<0x2\],r5
++ *[0-9a-f]*: e8 04 0a 14 st\.h r4\[r4<<0x1\],r4
++ *[0-9a-f]*: fc 0e 0a 1e st\.h lr\[lr<<0x1\],lr
++ *[0-9a-f]*: e4 09 0a 0b st\.h r2\[r9\],r11
++ *[0-9a-f]*: ea 01 0a 2c st\.h r5\[r1<<0x2\],r12
++ *[0-9a-f]*: fe 08 0a 23 st\.h pc\[r8<<0x2\],r3
++
++[0-9a-f]* <st_b5>:
++ *[0-9a-f]*: fe 0f 0b 0f st\.b pc\[pc\],pc
++ *[0-9a-f]*: f8 0c 0b 3c st\.b r12\[r12<<0x3\],r12
++ *[0-9a-f]*: ea 05 0b 25 st\.b r5\[r5<<0x2\],r5
++ *[0-9a-f]*: e8 04 0b 14 st\.b r4\[r4<<0x1\],r4
++ *[0-9a-f]*: fc 0e 0b 1e st\.b lr\[lr<<0x1\],lr
++ *[0-9a-f]*: e2 08 0b 16 st\.b r1\[r8<<0x1\],r6
++ *[0-9a-f]*: fc 0e 0b 31 st\.b lr\[lr<<0x3\],r1
++ *[0-9a-f]*: ea 00 0b 2f st\.b r5\[r0<<0x2\],pc
++
++[0-9a-f]* <divs>:
++ *[0-9a-f]*: fe 0f 0c 0f divs pc,pc,pc
++ *[0-9a-f]*: f8 0c 0c 0c divs r12,r12,r12
++ *[0-9a-f]*: ea 05 0c 05 divs r5,r5,r5
++ *[0-9a-f]*: e8 04 0c 04 divs r4,r4,r4
++ *[0-9a-f]*: fc 0e 0c 0e divs lr,lr,lr
++ *[0-9a-f]*: fe 0f 0c 03 divs r3,pc,pc
++ *[0-9a-f]*: f8 02 0c 09 divs r9,r12,r2
++ *[0-9a-f]*: e8 01 0c 07 divs r7,r4,r1
++
++[0-9a-f]* <add1>:
++ *[0-9a-f]*: 1e 0f add pc,pc
++ *[0-9a-f]*: 18 0c add r12,r12
++ *[0-9a-f]*: 0a 05 add r5,r5
++ *[0-9a-f]*: 08 04 add r4,r4
++ *[0-9a-f]*: 1c 0e add lr,lr
++ *[0-9a-f]*: 12 0c add r12,r9
++ *[0-9a-f]*: 06 06 add r6,r3
++ *[0-9a-f]*: 18 0a add r10,r12
++
++[0-9a-f]* <sub1>:
++ *[0-9a-f]*: 1e 1f sub pc,pc
++ *[0-9a-f]*: 18 1c sub r12,r12
++ *[0-9a-f]*: 0a 15 sub r5,r5
++ *[0-9a-f]*: 08 14 sub r4,r4
++ *[0-9a-f]*: 1c 1e sub lr,lr
++ *[0-9a-f]*: 0c 1e sub lr,r6
++ *[0-9a-f]*: 1a 10 sub r0,sp
++ *[0-9a-f]*: 18 16 sub r6,r12
++
++[0-9a-f]* <rsub1>:
++ *[0-9a-f]*: 1e 2f rsub pc,pc
++ *[0-9a-f]*: 18 2c rsub r12,r12
++ *[0-9a-f]*: 0a 25 rsub r5,r5
++ *[0-9a-f]*: 08 24 rsub r4,r4
++ *[0-9a-f]*: 1c 2e rsub lr,lr
++ *[0-9a-f]*: 1a 2b rsub r11,sp
++ *[0-9a-f]*: 08 27 rsub r7,r4
++ *[0-9a-f]*: 02 29 rsub r9,r1
++
++[0-9a-f]* <cp1>:
++ *[0-9a-f]*: 1e 3f cp\.w pc,pc
++ *[0-9a-f]*: 18 3c cp\.w r12,r12
++ *[0-9a-f]*: 0a 35 cp\.w r5,r5
++ *[0-9a-f]*: 08 34 cp\.w r4,r4
++ *[0-9a-f]*: 1c 3e cp\.w lr,lr
++ *[0-9a-f]*: 04 36 cp\.w r6,r2
++ *[0-9a-f]*: 12 30 cp\.w r0,r9
++ *[0-9a-f]*: 1a 33 cp\.w r3,sp
++
++[0-9a-f]* <or1>:
++ *[0-9a-f]*: 1e 4f or pc,pc
++ *[0-9a-f]*: 18 4c or r12,r12
++ *[0-9a-f]*: 0a 45 or r5,r5
++ *[0-9a-f]*: 08 44 or r4,r4
++ *[0-9a-f]*: 1c 4e or lr,lr
++ *[0-9a-f]*: 12 44 or r4,r9
++ *[0-9a-f]*: 08 4b or r11,r4
++ *[0-9a-f]*: 00 44 or r4,r0
++
++[0-9a-f]* <eor1>:
++ *[0-9a-f]*: 1e 5f eor pc,pc
++ *[0-9a-f]*: 18 5c eor r12,r12
++ *[0-9a-f]*: 0a 55 eor r5,r5
++ *[0-9a-f]*: 08 54 eor r4,r4
++ *[0-9a-f]*: 1c 5e eor lr,lr
++ *[0-9a-f]*: 16 5c eor r12,r11
++ *[0-9a-f]*: 02 50 eor r0,r1
++ *[0-9a-f]*: 1e 55 eor r5,pc
++
++[0-9a-f]* <and1>:
++ *[0-9a-f]*: 1e 6f and pc,pc
++ *[0-9a-f]*: 18 6c and r12,r12
++ *[0-9a-f]*: 0a 65 and r5,r5
++ *[0-9a-f]*: 08 64 and r4,r4
++ *[0-9a-f]*: 1c 6e and lr,lr
++ *[0-9a-f]*: 02 68 and r8,r1
++ *[0-9a-f]*: 1a 60 and r0,sp
++ *[0-9a-f]*: 0a 6a and r10,r5
++
++[0-9a-f]* <tst>:
++ *[0-9a-f]*: 1e 7f tst pc,pc
++ *[0-9a-f]*: 18 7c tst r12,r12
++ *[0-9a-f]*: 0a 75 tst r5,r5
++ *[0-9a-f]*: 08 74 tst r4,r4
++ *[0-9a-f]*: 1c 7e tst lr,lr
++ *[0-9a-f]*: 18 70 tst r0,r12
++ *[0-9a-f]*: 0c 7a tst r10,r6
++ *[0-9a-f]*: 08 7d tst sp,r4
++
++[0-9a-f]* <andn>:
++ *[0-9a-f]*: 1e 8f andn pc,pc
++ *[0-9a-f]*: 18 8c andn r12,r12
++ *[0-9a-f]*: 0a 85 andn r5,r5
++ *[0-9a-f]*: 08 84 andn r4,r4
++ *[0-9a-f]*: 1c 8e andn lr,lr
++ *[0-9a-f]*: 18 89 andn r9,r12
++ *[0-9a-f]*: 1a 8b andn r11,sp
++ *[0-9a-f]*: 0a 8c andn r12,r5
++
++[0-9a-f]* <mov3>:
++ *[0-9a-f]*: 1e 9f mov pc,pc
++ *[0-9a-f]*: 18 9c mov r12,r12
++ *[0-9a-f]*: 0a 95 mov r5,r5
++ *[0-9a-f]*: 08 94 mov r4,r4
++ *[0-9a-f]*: 1c 9e mov lr,lr
++ *[0-9a-f]*: 12 95 mov r5,r9
++ *[0-9a-f]*: 16 9b mov r11,r11
++ *[0-9a-f]*: 1c 92 mov r2,lr
++
++[0-9a-f]* <st_w1>:
++ *[0-9a-f]*: 1e af st\.w pc\+\+,pc
++ *[0-9a-f]*: 18 ac st\.w r12\+\+,r12
++ *[0-9a-f]*: 0a a5 st\.w r5\+\+,r5
++ *[0-9a-f]*: 08 a4 st\.w r4\+\+,r4
++ *[0-9a-f]*: 1c ae st\.w lr\+\+,lr
++ *[0-9a-f]*: 02 ab st\.w r1\+\+,r11
++ *[0-9a-f]*: 1a a0 st\.w sp\+\+,r0
++ *[0-9a-f]*: 1a a1 st\.w sp\+\+,r1
++
++[0-9a-f]* <st_h1>:
++ *[0-9a-f]*: 1e bf st\.h pc\+\+,pc
++ *[0-9a-f]*: 18 bc st\.h r12\+\+,r12
++ *[0-9a-f]*: 0a b5 st\.h r5\+\+,r5
++ *[0-9a-f]*: 08 b4 st\.h r4\+\+,r4
++ *[0-9a-f]*: 1c be st\.h lr\+\+,lr
++ *[0-9a-f]*: 18 bd st\.h r12\+\+,sp
++ *[0-9a-f]*: 0e be st\.h r7\+\+,lr
++ *[0-9a-f]*: 0e b4 st\.h r7\+\+,r4
++
++[0-9a-f]* <st_b1>:
++ *[0-9a-f]*: 1e cf st\.b pc\+\+,pc
++ *[0-9a-f]*: 18 cc st\.b r12\+\+,r12
++ *[0-9a-f]*: 0a c5 st\.b r5\+\+,r5
++ *[0-9a-f]*: 08 c4 st\.b r4\+\+,r4
++ *[0-9a-f]*: 1c ce st\.b lr\+\+,lr
++ *[0-9a-f]*: 12 cd st\.b r9\+\+,sp
++ *[0-9a-f]*: 02 cd st\.b r1\+\+,sp
++ *[0-9a-f]*: 00 c4 st\.b r0\+\+,r4
++
++[0-9a-f]* <st_w2>:
++ *[0-9a-f]*: 1e df st\.w --pc,pc
++ *[0-9a-f]*: 18 dc st\.w --r12,r12
++ *[0-9a-f]*: 0a d5 st\.w --r5,r5
++ *[0-9a-f]*: 08 d4 st\.w --r4,r4
++ *[0-9a-f]*: 1c de st\.w --lr,lr
++ *[0-9a-f]*: 02 d7 st\.w --r1,r7
++ *[0-9a-f]*: 06 d9 st\.w --r3,r9
++ *[0-9a-f]*: 0a d5 st\.w --r5,r5
++
++[0-9a-f]* <st_h2>:
++ *[0-9a-f]*: 1e ef st\.h --pc,pc
++ *[0-9a-f]*: 18 ec st\.h --r12,r12
++ *[0-9a-f]*: 0a e5 st\.h --r5,r5
++ *[0-9a-f]*: 08 e4 st\.h --r4,r4
++ *[0-9a-f]*: 1c ee st\.h --lr,lr
++ *[0-9a-f]*: 0a e7 st\.h --r5,r7
++ *[0-9a-f]*: 10 e8 st\.h --r8,r8
++ *[0-9a-f]*: 0e e2 st\.h --r7,r2
++
++[0-9a-f]* <st_b2>:
++ *[0-9a-f]*: 1e ff st\.b --pc,pc
++ *[0-9a-f]*: 18 fc st\.b --r12,r12
++ *[0-9a-f]*: 0a f5 st\.b --r5,r5
++ *[0-9a-f]*: 08 f4 st\.b --r4,r4
++ *[0-9a-f]*: 1c fe st\.b --lr,lr
++ *[0-9a-f]*: 1a fd st\.b --sp,sp
++ *[0-9a-f]*: 1a fb st\.b --sp,r11
++ *[0-9a-f]*: 08 f5 st\.b --r4,r5
++
++[0-9a-f]* <ld_w1>:
++ *[0-9a-f]*: 1f 0f ld\.w pc,pc\+\+
++ *[0-9a-f]*: 19 0c ld\.w r12,r12\+\+
++ *[0-9a-f]*: 0b 05 ld\.w r5,r5\+\+
++ *[0-9a-f]*: 09 04 ld\.w r4,r4\+\+
++ *[0-9a-f]*: 1d 0e ld\.w lr,lr\+\+
++ *[0-9a-f]*: 0f 03 ld\.w r3,r7\+\+
++ *[0-9a-f]*: 1d 03 ld\.w r3,lr\+\+
++ *[0-9a-f]*: 0b 0c ld\.w r12,r5\+\+
++
++[0-9a-f]* <ld_sh1>:
++ *[0-9a-f]*: 1f 1f ld\.sh pc,pc\+\+
++ *[0-9a-f]*: 19 1c ld\.sh r12,r12\+\+
++ *[0-9a-f]*: 0b 15 ld\.sh r5,r5\+\+
++ *[0-9a-f]*: 09 14 ld\.sh r4,r4\+\+
++ *[0-9a-f]*: 1d 1e ld\.sh lr,lr\+\+
++ *[0-9a-f]*: 05 1b ld\.sh r11,r2\+\+
++ *[0-9a-f]*: 11 12 ld\.sh r2,r8\+\+
++ *[0-9a-f]*: 0d 17 ld\.sh r7,r6\+\+
++
++[0-9a-f]* <ld_uh1>:
++ *[0-9a-f]*: 1f 2f ld\.uh pc,pc\+\+
++ *[0-9a-f]*: 19 2c ld\.uh r12,r12\+\+
++ *[0-9a-f]*: 0b 25 ld\.uh r5,r5\+\+
++ *[0-9a-f]*: 09 24 ld\.uh r4,r4\+\+
++ *[0-9a-f]*: 1d 2e ld\.uh lr,lr\+\+
++ *[0-9a-f]*: 0f 26 ld\.uh r6,r7\+\+
++ *[0-9a-f]*: 17 2a ld\.uh r10,r11\+\+
++ *[0-9a-f]*: 09 2e ld\.uh lr,r4\+\+
++
++[0-9a-f]* <ld_ub1>:
++ *[0-9a-f]*: 1f 3f ld\.ub pc,pc\+\+
++ *[0-9a-f]*: 19 3c ld\.ub r12,r12\+\+
++ *[0-9a-f]*: 0b 35 ld\.ub r5,r5\+\+
++ *[0-9a-f]*: 09 34 ld\.ub r4,r4\+\+
++ *[0-9a-f]*: 1d 3e ld\.ub lr,lr\+\+
++ *[0-9a-f]*: 1d 38 ld\.ub r8,lr\+\+
++ *[0-9a-f]*: 19 3c ld\.ub r12,r12\+\+
++ *[0-9a-f]*: 15 3b ld\.ub r11,r10\+\+
++
++[0-9a-f]* <ld_w2>:
++ *[0-9a-f]*: 1f 4f ld\.w pc,--pc
++ *[0-9a-f]*: 19 4c ld\.w r12,--r12
++ *[0-9a-f]*: 0b 45 ld\.w r5,--r5
++ *[0-9a-f]*: 09 44 ld\.w r4,--r4
++ *[0-9a-f]*: 1d 4e ld\.w lr,--lr
++ *[0-9a-f]*: 1d 4a ld\.w r10,--lr
++ *[0-9a-f]*: 13 4c ld\.w r12,--r9
++ *[0-9a-f]*: 0b 46 ld\.w r6,--r5
++
++[0-9a-f]* <ld_sh2>:
++ *[0-9a-f]*: 1f 5f ld\.sh pc,--pc
++ *[0-9a-f]*: 19 5c ld\.sh r12,--r12
++ *[0-9a-f]*: 0b 55 ld\.sh r5,--r5
++ *[0-9a-f]*: 09 54 ld\.sh r4,--r4
++ *[0-9a-f]*: 1d 5e ld\.sh lr,--lr
++ *[0-9a-f]*: 15 5f ld\.sh pc,--r10
++ *[0-9a-f]*: 07 56 ld\.sh r6,--r3
++ *[0-9a-f]*: 0d 54 ld\.sh r4,--r6
++
++[0-9a-f]* <ld_uh2>:
++ *[0-9a-f]*: 1f 6f ld\.uh pc,--pc
++ *[0-9a-f]*: 19 6c ld\.uh r12,--r12
++ *[0-9a-f]*: 0b 65 ld\.uh r5,--r5
++ *[0-9a-f]*: 09 64 ld\.uh r4,--r4
++ *[0-9a-f]*: 1d 6e ld\.uh lr,--lr
++ *[0-9a-f]*: 05 63 ld\.uh r3,--r2
++ *[0-9a-f]*: 01 61 ld\.uh r1,--r0
++ *[0-9a-f]*: 13 62 ld\.uh r2,--r9
++
++[0-9a-f]* <ld_ub2>:
++ *[0-9a-f]*: 1f 7f ld\.ub pc,--pc
++ *[0-9a-f]*: 19 7c ld\.ub r12,--r12
++ *[0-9a-f]*: 0b 75 ld\.ub r5,--r5
++ *[0-9a-f]*: 09 74 ld\.ub r4,--r4
++ *[0-9a-f]*: 1d 7e ld\.ub lr,--lr
++ *[0-9a-f]*: 03 71 ld\.ub r1,--r1
++ *[0-9a-f]*: 0d 70 ld\.ub r0,--r6
++ *[0-9a-f]*: 0f 72 ld\.ub r2,--r7
++
++[0-9a-f]* <ld_ub3>:
++ *[0-9a-f]*: 1f 8f ld\.ub pc,pc\[0x0\]
++ *[0-9a-f]*: 19 fc ld\.ub r12,r12\[0x7\]
++ *[0-9a-f]*: 0b c5 ld\.ub r5,r5\[0x4\]
++ *[0-9a-f]*: 09 b4 ld\.ub r4,r4\[0x3\]
++ *[0-9a-f]*: 1d 9e ld\.ub lr,lr\[0x1\]
++ *[0-9a-f]*: 13 e6 ld\.ub r6,r9\[0x6\]
++ *[0-9a-f]*: 1d c2 ld\.ub r2,lr\[0x4\]
++ *[0-9a-f]*: 11 81 ld\.ub r1,r8\[0x0\]
++
++[0-9a-f]* <sub3_sp>:
++ *[0-9a-f]*: 20 0d sub sp,0
++ *[0-9a-f]*: 2f fd sub sp,-4
++ *[0-9a-f]*: 28 0d sub sp,-512
++ *[0-9a-f]*: 27 fd sub sp,508
++ *[0-9a-f]*: 20 1d sub sp,4
++ *[0-9a-f]*: 20 bd sub sp,44
++ *[0-9a-f]*: 20 2d sub sp,8
++ *[0-9a-f]*: 25 7d sub sp,348
++
++[0-9a-f]* <sub3>:
++ *[0-9a-f]*: 20 0f sub pc,0
++ *[0-9a-f]*: 2f fc sub r12,-1
++ *[0-9a-f]*: 28 05 sub r5,-128
++ *[0-9a-f]*: 27 f4 sub r4,127
++ *[0-9a-f]*: 20 1e sub lr,1
++ *[0-9a-f]*: 2d 76 sub r6,-41
++ *[0-9a-f]*: 22 54 sub r4,37
++ *[0-9a-f]*: 23 8c sub r12,56
++
++[0-9a-f]* <mov1>:
++ *[0-9a-f]*: 30 0f mov pc,0
++ *[0-9a-f]*: 3f fc mov r12,-1
++ *[0-9a-f]*: 38 05 mov r5,-128
++ *[0-9a-f]*: 37 f4 mov r4,127
++ *[0-9a-f]*: 30 1e mov lr,1
++ *[0-9a-f]*: 30 ef mov pc,14
++ *[0-9a-f]*: 39 c6 mov r6,-100
++ *[0-9a-f]*: 38 6e mov lr,-122
++
++[0-9a-f]* <lddsp>:
++ *[0-9a-f]*: 40 0f lddsp pc,sp\[0x0\]
++ *[0-9a-f]*: 47 fc lddsp r12,sp\[0x1fc\]
++ *[0-9a-f]*: 44 05 lddsp r5,sp\[0x100\]
++ *[0-9a-f]*: 43 f4 lddsp r4,sp\[0xfc\]
++ *[0-9a-f]*: 40 1e lddsp lr,sp\[0x4\]
++ *[0-9a-f]*: 44 0e lddsp lr,sp\[0x100\]
++ *[0-9a-f]*: 40 5c lddsp r12,sp\[0x14\]
++ *[0-9a-f]*: 47 69 lddsp r9,sp\[0x1d8\]
++
++[0-9a-f]* <lddpc>:
++ *[0-9a-f]*: 48 0f lddpc pc,[0-9a-f]* <.*>
++ *[0-9a-f]*: 4f f0 lddpc r0,[0-9a-f]* <.*>
++ *[0-9a-f]*: 4c 08 lddpc r8,[0-9a-f]* <.*>
++ *[0-9a-f]*: 4b f7 lddpc r7,[0-9a-f]* <.*>
++ *[0-9a-f]*: 48 1e lddpc lr,[0-9a-f]* <.*>
++ *[0-9a-f]*: 4f 6d lddpc sp,[0-9a-f]* <.*>
++ *[0-9a-f]*: 49 e6 lddpc r6,[0-9a-f]* <.*>
++ *[0-9a-f]*: 48 7b lddpc r11,[0-9a-f]* <.*>
++
++[0-9a-f]* <stdsp>:
++ *[0-9a-f]*: 50 0f stdsp sp\[0x0\],pc
++ *[0-9a-f]*: 57 fc stdsp sp\[0x1fc\],r12
++ *[0-9a-f]*: 54 05 stdsp sp\[0x100\],r5
++ *[0-9a-f]*: 53 f4 stdsp sp\[0xfc\],r4
++ *[0-9a-f]*: 50 1e stdsp sp\[0x4\],lr
++ *[0-9a-f]*: 54 cf stdsp sp\[0x130\],pc
++ *[0-9a-f]*: 54 00 stdsp sp\[0x100\],r0
++ *[0-9a-f]*: 55 45 stdsp sp\[0x150\],r5
++
++[0-9a-f]* <cp2>:
++ *[0-9a-f]*: 58 0f cp.w pc,0
++ *[0-9a-f]*: 5b fc cp.w r12,-1
++ *[0-9a-f]*: 5a 05 cp.w r5,-32
++ *[0-9a-f]*: 59 f4 cp.w r4,31
++ *[0-9a-f]*: 58 1e cp.w lr,1
++ *[0-9a-f]*: 58 38 cp.w r8,3
++ *[0-9a-f]*: 59 0e cp.w lr,16
++ *[0-9a-f]*: 5a 67 cp.w r7,-26
++
++[0-9a-f]* <acr>:
++ *[0-9a-f]*: 5c 0f acr pc
++ *[0-9a-f]*: 5c 0c acr r12
++ *[0-9a-f]*: 5c 05 acr r5
++ *[0-9a-f]*: 5c 04 acr r4
++ *[0-9a-f]*: 5c 0e acr lr
++ *[0-9a-f]*: 5c 02 acr r2
++ *[0-9a-f]*: 5c 0c acr r12
++ *[0-9a-f]*: 5c 0f acr pc
++
++[0-9a-f]* <scr>:
++ *[0-9a-f]*: 5c 1f scr pc
++ *[0-9a-f]*: 5c 1c scr r12
++ *[0-9a-f]*: 5c 15 scr r5
++ *[0-9a-f]*: 5c 14 scr r4
++ *[0-9a-f]*: 5c 1e scr lr
++ *[0-9a-f]*: 5c 1f scr pc
++ *[0-9a-f]*: 5c 16 scr r6
++ *[0-9a-f]*: 5c 11 scr r1
++
++[0-9a-f]* <cpc0>:
++ *[0-9a-f]*: 5c 2f cpc pc
++ *[0-9a-f]*: 5c 2c cpc r12
++ *[0-9a-f]*: 5c 25 cpc r5
++ *[0-9a-f]*: 5c 24 cpc r4
++ *[0-9a-f]*: 5c 2e cpc lr
++ *[0-9a-f]*: 5c 2f cpc pc
++ *[0-9a-f]*: 5c 24 cpc r4
++ *[0-9a-f]*: 5c 29 cpc r9
++
++[0-9a-f]* <neg>:
++ *[0-9a-f]*: 5c 3f neg pc
++ *[0-9a-f]*: 5c 3c neg r12
++ *[0-9a-f]*: 5c 35 neg r5
++ *[0-9a-f]*: 5c 34 neg r4
++ *[0-9a-f]*: 5c 3e neg lr
++ *[0-9a-f]*: 5c 37 neg r7
++ *[0-9a-f]*: 5c 31 neg r1
++ *[0-9a-f]*: 5c 39 neg r9
++
++[0-9a-f]* <abs>:
++ *[0-9a-f]*: 5c 4f abs pc
++ *[0-9a-f]*: 5c 4c abs r12
++ *[0-9a-f]*: 5c 45 abs r5
++ *[0-9a-f]*: 5c 44 abs r4
++ *[0-9a-f]*: 5c 4e abs lr
++ *[0-9a-f]*: 5c 46 abs r6
++ *[0-9a-f]*: 5c 46 abs r6
++ *[0-9a-f]*: 5c 44 abs r4
++
++[0-9a-f]* <castu_b>:
++ *[0-9a-f]*: 5c 5f castu\.b pc
++ *[0-9a-f]*: 5c 5c castu\.b r12
++ *[0-9a-f]*: 5c 55 castu\.b r5
++ *[0-9a-f]*: 5c 54 castu\.b r4
++ *[0-9a-f]*: 5c 5e castu\.b lr
++ *[0-9a-f]*: 5c 57 castu\.b r7
++ *[0-9a-f]*: 5c 5d castu\.b sp
++ *[0-9a-f]*: 5c 59 castu\.b r9
++
++[0-9a-f]* <casts_b>:
++ *[0-9a-f]*: 5c 6f casts\.b pc
++ *[0-9a-f]*: 5c 6c casts\.b r12
++ *[0-9a-f]*: 5c 65 casts\.b r5
++ *[0-9a-f]*: 5c 64 casts\.b r4
++ *[0-9a-f]*: 5c 6e casts\.b lr
++ *[0-9a-f]*: 5c 6b casts\.b r11
++ *[0-9a-f]*: 5c 61 casts\.b r1
++ *[0-9a-f]*: 5c 6a casts\.b r10
++
++[0-9a-f]* <castu_h>:
++ *[0-9a-f]*: 5c 7f castu\.h pc
++ *[0-9a-f]*: 5c 7c castu\.h r12
++ *[0-9a-f]*: 5c 75 castu\.h r5
++ *[0-9a-f]*: 5c 74 castu\.h r4
++ *[0-9a-f]*: 5c 7e castu\.h lr
++ *[0-9a-f]*: 5c 7a castu\.h r10
++ *[0-9a-f]*: 5c 7b castu\.h r11
++ *[0-9a-f]*: 5c 71 castu\.h r1
++
++[0-9a-f]* <casts_h>:
++ *[0-9a-f]*: 5c 8f casts\.h pc
++ *[0-9a-f]*: 5c 8c casts\.h r12
++ *[0-9a-f]*: 5c 85 casts\.h r5
++ *[0-9a-f]*: 5c 84 casts\.h r4
++ *[0-9a-f]*: 5c 8e casts\.h lr
++ *[0-9a-f]*: 5c 80 casts\.h r0
++ *[0-9a-f]*: 5c 85 casts\.h r5
++ *[0-9a-f]*: 5c 89 casts\.h r9
++
++[0-9a-f]* <brev>:
++ *[0-9a-f]*: 5c 9f brev pc
++ *[0-9a-f]*: 5c 9c brev r12
++ *[0-9a-f]*: 5c 95 brev r5
++ *[0-9a-f]*: 5c 94 brev r4
++ *[0-9a-f]*: 5c 9e brev lr
++ *[0-9a-f]*: 5c 95 brev r5
++ *[0-9a-f]*: 5c 9a brev r10
++ *[0-9a-f]*: 5c 98 brev r8
++
++[0-9a-f]* <swap_h>:
++ *[0-9a-f]*: 5c af swap\.h pc
++ *[0-9a-f]*: 5c ac swap\.h r12
++ *[0-9a-f]*: 5c a5 swap\.h r5
++ *[0-9a-f]*: 5c a4 swap\.h r4
++ *[0-9a-f]*: 5c ae swap\.h lr
++ *[0-9a-f]*: 5c a7 swap\.h r7
++ *[0-9a-f]*: 5c a0 swap\.h r0
++ *[0-9a-f]*: 5c a8 swap\.h r8
++
++[0-9a-f]* <swap_b>:
++ *[0-9a-f]*: 5c bf swap\.b pc
++ *[0-9a-f]*: 5c bc swap\.b r12
++ *[0-9a-f]*: 5c b5 swap\.b r5
++ *[0-9a-f]*: 5c b4 swap\.b r4
++ *[0-9a-f]*: 5c be swap\.b lr
++ *[0-9a-f]*: 5c ba swap\.b r10
++ *[0-9a-f]*: 5c bc swap\.b r12
++ *[0-9a-f]*: 5c b1 swap\.b r1
++
++[0-9a-f]* <swap_bh>:
++ *[0-9a-f]*: 5c cf swap\.bh pc
++ *[0-9a-f]*: 5c cc swap\.bh r12
++ *[0-9a-f]*: 5c c5 swap\.bh r5
++ *[0-9a-f]*: 5c c4 swap\.bh r4
++ *[0-9a-f]*: 5c ce swap\.bh lr
++ *[0-9a-f]*: 5c c9 swap\.bh r9
++ *[0-9a-f]*: 5c c4 swap\.bh r4
++ *[0-9a-f]*: 5c c1 swap\.bh r1
++
++[0-9a-f]* <One_s_compliment>:
++ *[0-9a-f]*: 5c df com pc
++ *[0-9a-f]*: 5c dc com r12
++ *[0-9a-f]*: 5c d5 com r5
++ *[0-9a-f]*: 5c d4 com r4
++ *[0-9a-f]*: 5c de com lr
++ *[0-9a-f]*: 5c d2 com r2
++ *[0-9a-f]*: 5c d2 com r2
++ *[0-9a-f]*: 5c d7 com r7
++
++[0-9a-f]* <tnbz>:
++ *[0-9a-f]*: 5c ef tnbz pc
++ *[0-9a-f]*: 5c ec tnbz r12
++ *[0-9a-f]*: 5c e5 tnbz r5
++ *[0-9a-f]*: 5c e4 tnbz r4
++ *[0-9a-f]*: 5c ee tnbz lr
++ *[0-9a-f]*: 5c e8 tnbz r8
++ *[0-9a-f]*: 5c ec tnbz r12
++ *[0-9a-f]*: 5c ef tnbz pc
++
++[0-9a-f]* <rol>:
++ *[0-9a-f]*: 5c ff rol pc
++ *[0-9a-f]*: 5c fc rol r12
++ *[0-9a-f]*: 5c f5 rol r5
++ *[0-9a-f]*: 5c f4 rol r4
++ *[0-9a-f]*: 5c fe rol lr
++ *[0-9a-f]*: 5c fa rol r10
++ *[0-9a-f]*: 5c f9 rol r9
++ *[0-9a-f]*: 5c f5 rol r5
++
++[0-9a-f]* <ror>:
++ *[0-9a-f]*: 5d 0f ror pc
++ *[0-9a-f]*: 5d 0c ror r12
++ *[0-9a-f]*: 5d 05 ror r5
++ *[0-9a-f]*: 5d 04 ror r4
++ *[0-9a-f]*: 5d 0e ror lr
++ *[0-9a-f]*: 5d 08 ror r8
++ *[0-9a-f]*: 5d 04 ror r4
++ *[0-9a-f]*: 5d 07 ror r7
++
++[0-9a-f]* <icall>:
++ *[0-9a-f]*: 5d 1f icall pc
++ *[0-9a-f]*: 5d 1c icall r12
++ *[0-9a-f]*: 5d 15 icall r5
++ *[0-9a-f]*: 5d 14 icall r4
++ *[0-9a-f]*: 5d 1e icall lr
++ *[0-9a-f]*: 5d 13 icall r3
++ *[0-9a-f]*: 5d 11 icall r1
++ *[0-9a-f]*: 5d 13 icall r3
++
++[0-9a-f]* <mustr>:
++ *[0-9a-f]*: 5d 2f mustr pc
++ *[0-9a-f]*: 5d 2c mustr r12
++ *[0-9a-f]*: 5d 25 mustr r5
++ *[0-9a-f]*: 5d 24 mustr r4
++ *[0-9a-f]*: 5d 2e mustr lr
++ *[0-9a-f]*: 5d 21 mustr r1
++ *[0-9a-f]*: 5d 24 mustr r4
++ *[0-9a-f]*: 5d 2c mustr r12
++
++[0-9a-f]* <musfr>:
++ *[0-9a-f]*: 5d 3f musfr pc
++ *[0-9a-f]*: 5d 3c musfr r12
++ *[0-9a-f]*: 5d 35 musfr r5
++ *[0-9a-f]*: 5d 34 musfr r4
++ *[0-9a-f]*: 5d 3e musfr lr
++ *[0-9a-f]*: 5d 3b musfr r11
++ *[0-9a-f]*: 5d 3c musfr r12
++ *[0-9a-f]*: 5d 32 musfr r2
++
++[0-9a-f]* <ret_cond>:
++ *[0-9a-f]*: 5e 0f reteq 1
++ *[0-9a-f]*: 5e fc retal r12
++ *[0-9a-f]*: 5e 85 retls r5
++ *[0-9a-f]*: 5e 74 retpl r4
++ *[0-9a-f]*: 5e 1e retne -1
++ *[0-9a-f]*: 5e 90 retgt r0
++ *[0-9a-f]*: 5e 9c retgt r12
++ *[0-9a-f]*: 5e 4a retge r10
++
++[0-9a-f]* <sr_cond>:
++ *[0-9a-f]*: 5f 0f sreq pc
++ *[0-9a-f]*: 5f fc sral r12
++ *[0-9a-f]*: 5f 85 srls r5
++ *[0-9a-f]*: 5f 74 srpl r4
++ *[0-9a-f]*: 5f 1e srne lr
++ *[0-9a-f]*: 5f 50 srlt r0
++ *[0-9a-f]*: 5f fd sral sp
++ *[0-9a-f]*: 5f 49 srge r9
++
++[0-9a-f]* <ld_w3>:
++ *[0-9a-f]*: 7e 0f ld\.w pc,pc\[0x0\]
++ *[0-9a-f]*: 79 fc ld\.w r12,r12\[0x7c\]
++ *[0-9a-f]*: 6b 05 ld\.w r5,r5\[0x40\]
++ *[0-9a-f]*: 68 f4 ld\.w r4,r4\[0x3c\]
++ *[0-9a-f]*: 7c 1e ld\.w lr,lr\[0x4\]
++ *[0-9a-f]*: 64 dd ld\.w sp,r2\[0x34\]
++ *[0-9a-f]*: 62 29 ld\.w r9,r1\[0x8\]
++ *[0-9a-f]*: 7a f5 ld\.w r5,sp\[0x3c\]
++
++[0-9a-f]* <ld_sh3>:
++ *[0-9a-f]*: 9e 0f ld\.sh pc,pc\[0x0\]
++ *[0-9a-f]*: 98 7c ld\.sh r12,r12\[0xe\]
++ *[0-9a-f]*: 8a 45 ld\.sh r5,r5\[0x8\]
++ *[0-9a-f]*: 88 34 ld\.sh r4,r4\[0x6\]
++ *[0-9a-f]*: 9c 1e ld\.sh lr,lr\[0x2\]
++ *[0-9a-f]*: 84 44 ld\.sh r4,r2\[0x8\]
++ *[0-9a-f]*: 9c 5d ld\.sh sp,lr\[0xa\]
++ *[0-9a-f]*: 96 12 ld\.sh r2,r11\[0x2\]
++
++[0-9a-f]* <ld_uh3>:
++ *[0-9a-f]*: 9e 8f ld\.uh pc,pc\[0x0\]
++ *[0-9a-f]*: 98 fc ld\.uh r12,r12\[0xe\]
++ *[0-9a-f]*: 8a c5 ld\.uh r5,r5\[0x8\]
++ *[0-9a-f]*: 88 b4 ld\.uh r4,r4\[0x6\]
++ *[0-9a-f]*: 9c 9e ld\.uh lr,lr\[0x2\]
++ *[0-9a-f]*: 80 da ld\.uh r10,r0\[0xa\]
++ *[0-9a-f]*: 96 c8 ld\.uh r8,r11\[0x8\]
++ *[0-9a-f]*: 84 ea ld\.uh r10,r2\[0xc\]
++
++[0-9a-f]* <st_w3>:
++ *[0-9a-f]*: 9f 0f st\.w pc\[0x0\],pc
++ *[0-9a-f]*: 99 fc st\.w r12\[0x3c\],r12
++ *[0-9a-f]*: 8b 85 st\.w r5\[0x20\],r5
++ *[0-9a-f]*: 89 74 st\.w r4\[0x1c\],r4
++ *[0-9a-f]*: 9d 1e st\.w lr\[0x4\],lr
++ *[0-9a-f]*: 8f bb st\.w r7\[0x2c\],r11
++ *[0-9a-f]*: 85 66 st\.w r2\[0x18\],r6
++ *[0-9a-f]*: 89 39 st\.w r4\[0xc\],r9
++
++[0-9a-f]* <st_h3>:
++ *[0-9a-f]*: be 0f st\.h pc\[0x0\],pc
++ *[0-9a-f]*: b8 7c st\.h r12\[0xe\],r12
++ *[0-9a-f]*: aa 45 st\.h r5\[0x8\],r5
++ *[0-9a-f]*: a8 34 st\.h r4\[0x6\],r4
++ *[0-9a-f]*: bc 1e st\.h lr\[0x2\],lr
++ *[0-9a-f]*: bc 5c st\.h lr\[0xa\],r12
++ *[0-9a-f]*: ac 20 st\.h r6\[0x4\],r0
++ *[0-9a-f]*: aa 6d st\.h r5\[0xc\],sp
++
++[0-9a-f]* <st_b3>:
++ *[0-9a-f]*: be 8f st\.b pc\[0x0\],pc
++ *[0-9a-f]*: b8 fc st\.b r12\[0x7\],r12
++ *[0-9a-f]*: aa c5 st\.b r5\[0x4\],r5
++ *[0-9a-f]*: a8 b4 st\.b r4\[0x3\],r4
++ *[0-9a-f]*: bc 9e st\.b lr\[0x1\],lr
++ *[0-9a-f]*: b8 e9 st\.b r12\[0x6\],r9
++ *[0-9a-f]*: a4 be st\.b r2\[0x3\],lr
++ *[0-9a-f]*: a2 bb st\.b r1\[0x3\],r11
++
++[0-9a-f]* <ldd>:
++ *[0-9a-f]*: bf 00 ld\.d r0,pc
++ *[0-9a-f]*: b9 0e ld\.d lr,r12
++ *[0-9a-f]*: ab 08 ld\.d r8,r5
++ *[0-9a-f]*: a9 06 ld\.d r6,r4
++ *[0-9a-f]*: bd 02 ld\.d r2,lr
++ *[0-9a-f]*: af 0e ld\.d lr,r7
++ *[0-9a-f]*: a9 04 ld\.d r4,r4
++ *[0-9a-f]*: bf 0e ld\.d lr,pc
++
++[0-9a-f]* <ldd_postinc>:
++ *[0-9a-f]*: bf 01 ld\.d r0,pc\+\+
++ *[0-9a-f]*: b9 0f ld\.d lr,r12\+\+
++ *[0-9a-f]*: ab 09 ld\.d r8,r5\+\+
++ *[0-9a-f]*: a9 07 ld\.d r6,r4\+\+
++ *[0-9a-f]*: bd 03 ld\.d r2,lr\+\+
++ *[0-9a-f]*: ab 0f ld\.d lr,r5\+\+
++ *[0-9a-f]*: b7 0d ld\.d r12,r11\+\+
++ *[0-9a-f]*: b9 03 ld\.d r2,r12\+\+
++
++[0-9a-f]* <ldd_predec>:
++ *[0-9a-f]*: bf 10 ld\.d r0,--pc
++ *[0-9a-f]*: b9 1e ld\.d lr,--r12
++ *[0-9a-f]*: ab 18 ld\.d r8,--r5
++ *[0-9a-f]*: a9 16 ld\.d r6,--r4
++ *[0-9a-f]*: bd 12 ld\.d r2,--lr
++ *[0-9a-f]*: a1 18 ld\.d r8,--r0
++ *[0-9a-f]*: bf 1a ld\.d r10,--pc
++ *[0-9a-f]*: a9 12 ld\.d r2,--r4
++
++[0-9a-f]* <std>:
++ *[0-9a-f]*: bf 11 st\.d pc,r0
++ *[0-9a-f]*: b9 1f st\.d r12,lr
++ *[0-9a-f]*: ab 19 st\.d r5,r8
++ *[0-9a-f]*: a9 17 st\.d r4,r6
++ *[0-9a-f]*: bd 13 st\.d lr,r2
++ *[0-9a-f]*: a1 1d st\.d r0,r12
++ *[0-9a-f]*: bb 15 st\.d sp,r4
++ *[0-9a-f]*: b9 1d st\.d r12,r12
++
++[0-9a-f]* <std_postinc>:
++ *[0-9a-f]*: bf 20 st\.d pc\+\+,r0
++ *[0-9a-f]*: b9 2e st\.d r12\+\+,lr
++ *[0-9a-f]*: ab 28 st\.d r5\+\+,r8
++ *[0-9a-f]*: a9 26 st\.d r4\+\+,r6
++ *[0-9a-f]*: bd 22 st\.d lr\+\+,r2
++ *[0-9a-f]*: bb 26 st\.d sp\+\+,r6
++ *[0-9a-f]*: b5 26 st\.d r10\+\+,r6
++ *[0-9a-f]*: af 22 st\.d r7\+\+,r2
++
++[0-9a-f]* <std_predec>:
++ *[0-9a-f]*: bf 21 st\.d --pc,r0
++ *[0-9a-f]*: b9 2f st\.d --r12,lr
++ *[0-9a-f]*: ab 29 st\.d --r5,r8
++ *[0-9a-f]*: a9 27 st\.d --r4,r6
++ *[0-9a-f]*: bd 23 st\.d --lr,r2
++ *[0-9a-f]*: a7 27 st\.d --r3,r6
++ *[0-9a-f]*: bd 23 st\.d --lr,r2
++ *[0-9a-f]*: a1 25 st\.d --r0,r4
++
++[0-9a-f]* <mul>:
++ *[0-9a-f]*: bf 3f mul pc,pc
++ *[0-9a-f]*: b9 3c mul r12,r12
++ *[0-9a-f]*: ab 35 mul r5,r5
++ *[0-9a-f]*: a9 34 mul r4,r4
++ *[0-9a-f]*: bd 3e mul lr,lr
++ *[0-9a-f]*: bd 3a mul r10,lr
++ *[0-9a-f]*: b1 30 mul r0,r8
++ *[0-9a-f]*: ab 38 mul r8,r5
++
++[0-9a-f]* <asr_imm5>:
++ *[0-9a-f]*: a1 4f asr pc,0x0
++ *[0-9a-f]*: bf 5c asr r12,0x1f
++ *[0-9a-f]*: b1 45 asr r5,0x10
++ *[0-9a-f]*: af 54 asr r4,0xf
++ *[0-9a-f]*: a1 5e asr lr,0x1
++ *[0-9a-f]*: b7 56 asr r6,0x17
++ *[0-9a-f]*: b3 46 asr r6,0x12
++ *[0-9a-f]*: a9 45 asr r5,0x8
++
++[0-9a-f]* <lsl_imm5>:
++ *[0-9a-f]*: a1 6f lsl pc,0x0
++ *[0-9a-f]*: bf 7c lsl r12,0x1f
++ *[0-9a-f]*: b1 65 lsl r5,0x10
++ *[0-9a-f]*: af 74 lsl r4,0xf
++ *[0-9a-f]*: a1 7e lsl lr,0x1
++ *[0-9a-f]*: ad 7c lsl r12,0xd
++ *[0-9a-f]*: b1 66 lsl r6,0x10
++ *[0-9a-f]*: b9 71 lsl r1,0x19
++
++[0-9a-f]* <lsr_imm5>:
++ *[0-9a-f]*: a1 8f lsr pc,0x0
++ *[0-9a-f]*: bf 9c lsr r12,0x1f
++ *[0-9a-f]*: b1 85 lsr r5,0x10
++ *[0-9a-f]*: af 94 lsr r4,0xf
++ *[0-9a-f]*: a1 9e lsr lr,0x1
++ *[0-9a-f]*: a1 90 lsr r0,0x1
++ *[0-9a-f]*: ab 88 lsr r8,0xa
++ *[0-9a-f]*: bb 87 lsr r7,0x1a
++
++[0-9a-f]* <sbr>:
++ *[0-9a-f]*: a1 af sbr pc,0x0
++ *[0-9a-f]*: bf bc sbr r12,0x1f
++ *[0-9a-f]*: b1 a5 sbr r5,0x10
++ *[0-9a-f]*: af b4 sbr r4,0xf
++ *[0-9a-f]*: a1 be sbr lr,0x1
++ *[0-9a-f]*: bf b8 sbr r8,0x1f
++ *[0-9a-f]*: b7 a6 sbr r6,0x16
++ *[0-9a-f]*: b7 b1 sbr r1,0x17
++
++[0-9a-f]* <cbr>:
++ *[0-9a-f]*: a1 cf cbr pc,0x0
++ *[0-9a-f]*: bf dc cbr r12,0x1f
++ *[0-9a-f]*: b1 c5 cbr r5,0x10
++ *[0-9a-f]*: af d4 cbr r4,0xf
++ *[0-9a-f]*: a1 de cbr lr,0x1
++ *[0-9a-f]*: ab cc cbr r12,0xa
++ *[0-9a-f]*: b7 c7 cbr r7,0x16
++ *[0-9a-f]*: a9 d8 cbr r8,0x9
++
++[0-9a-f]* <brc1>:
++ *[0-9a-f]*: c0 00 breq [0-9a-f]* <.*>
++ *[0-9a-f]*: cf f7 brpl [0-9a-f]* <.*>
++ *[0-9a-f]*: c8 04 brge [0-9a-f]* <.*>
++ *[0-9a-f]*: c7 f3 brcs [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 11 brne [0-9a-f]* <.*>
++ *[0-9a-f]*: c7 33 brcs [0-9a-f]* <.*>
++ *[0-9a-f]*: cf 70 breq [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 60 breq [0-9a-f]* <.*>
++
++[0-9a-f]* <rjmp>:
++ *[0-9a-f]*: c0 08 rjmp [0-9a-f]* <.*>
++ *[0-9a-f]*: cf fb rjmp [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 0a rjmp [0-9a-f]* <.*>
++ *[0-9a-f]*: cf f9 rjmp [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 18 rjmp [0-9a-f]* <.*>
++ *[0-9a-f]*: c1 fa rjmp [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 78 rjmp [0-9a-f]* <.*>
++ *[0-9a-f]*: cf ea rjmp [0-9a-f]* <.*>
++
++[0-9a-f]* <rcall1>:
++ *[0-9a-f]*: c0 0c rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: cf ff rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 0e rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: cf fd rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 1c rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: c6 cc rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: cf 7e rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: c1 ae rcall [0-9a-f]* <.*>
++
++[0-9a-f]* <acall>:
++ *[0-9a-f]*: d0 00 acall 0x0
++ *[0-9a-f]*: df f0 acall 0x3fc
++ *[0-9a-f]*: d8 00 acall 0x200
++ *[0-9a-f]*: d7 f0 acall 0x1fc
++ *[0-9a-f]*: d0 10 acall 0x4
++ *[0-9a-f]*: d5 90 acall 0x164
++ *[0-9a-f]*: d4 c0 acall 0x130
++ *[0-9a-f]*: d2 b0 acall 0xac
++
++[0-9a-f]* <scall>:
++ *[0-9a-f]*: d7 33 scall
++ *[0-9a-f]*: d7 33 scall
++ *[0-9a-f]*: d7 33 scall
++ *[0-9a-f]*: d7 33 scall
++ *[0-9a-f]*: d7 33 scall
++ *[0-9a-f]*: d7 33 scall
++ *[0-9a-f]*: d7 33 scall
++ *[0-9a-f]*: d7 33 scall
++
++[0-9a-f]* <popm>:
++ *[0-9a-f]*: d8 02 popm pc
++ *[0-9a-f]*: dd fa popm r0-r11,pc,r12=-1
++ *[0-9a-f]*: d4 02 popm lr
++ *[0-9a-f]*: db fa popm r0-r11,pc,r12=1
++ *[0-9a-f]*: d0 12 popm r0-r3
++ *[0-9a-f]*: d8 e2 popm r4-r10,pc
++ *[0-9a-f]*: d9 1a popm r0-r3,r11,pc,r12=0
++ *[0-9a-f]*: d7 b2 popm r0-r7,r10-r12,lr
++
++[0-9a-f]* <pushm>:
++ *[0-9a-f]*: d8 01 pushm pc
++ *[0-9a-f]*: df f1 pushm r0-r12,lr-pc
++ *[0-9a-f]*: d8 01 pushm pc
++ *[0-9a-f]*: d7 f1 pushm r0-r12,lr
++ *[0-9a-f]*: d0 11 pushm r0-r3
++ *[0-9a-f]*: dc c1 pushm r8-r10,lr-pc
++ *[0-9a-f]*: d0 91 pushm r0-r3,r10
++ *[0-9a-f]*: d2 41 pushm r8-r9,r12
++
++[0-9a-f]* <popm_n>:
++.*
++.*
++.*
++.*
++.*
++.*
++.*
++.*
++
++[0-9a-f]* <pushm_n>:
++.*
++.*
++.*
++.*
++.*
++.*
++.*
++.*
++
++[0-9a-f]* <csrfcz>:
++ *[0-9a-f]*: d0 03 csrfcz 0x0
++ *[0-9a-f]*: d1 f3 csrfcz 0x1f
++ *[0-9a-f]*: d1 03 csrfcz 0x10
++ *[0-9a-f]*: d0 f3 csrfcz 0xf
++ *[0-9a-f]*: d0 13 csrfcz 0x1
++ *[0-9a-f]*: d0 53 csrfcz 0x5
++ *[0-9a-f]*: d0 d3 csrfcz 0xd
++ *[0-9a-f]*: d1 73 csrfcz 0x17
++
++[0-9a-f]* <ssrf>:
++ *[0-9a-f]*: d2 03 ssrf 0x0
++ *[0-9a-f]*: d3 f3 ssrf 0x1f
++ *[0-9a-f]*: d3 03 ssrf 0x10
++ *[0-9a-f]*: d2 f3 ssrf 0xf
++ *[0-9a-f]*: d2 13 ssrf 0x1
++ *[0-9a-f]*: d3 d3 ssrf 0x1d
++ *[0-9a-f]*: d2 d3 ssrf 0xd
++ *[0-9a-f]*: d2 d3 ssrf 0xd
++
++[0-9a-f]* <csrf>:
++ *[0-9a-f]*: d4 03 csrf 0x0
++ *[0-9a-f]*: d5 f3 csrf 0x1f
++ *[0-9a-f]*: d5 03 csrf 0x10
++ *[0-9a-f]*: d4 f3 csrf 0xf
++ *[0-9a-f]*: d4 13 csrf 0x1
++ *[0-9a-f]*: d4 a3 csrf 0xa
++ *[0-9a-f]*: d4 f3 csrf 0xf
++ *[0-9a-f]*: d4 b3 csrf 0xb
++
++[0-9a-f]* <rete>:
++ *[0-9a-f]*: d6 03 rete
++
++[0-9a-f]* <rets>:
++ *[0-9a-f]*: d6 13 rets
++
++[0-9a-f]* <retd>:
++ *[0-9a-f]*: d6 23 retd
++
++[0-9a-f]* <retj>:
++ *[0-9a-f]*: d6 33 retj
++
++[0-9a-f]* <tlbr>:
++ *[0-9a-f]*: d6 43 tlbr
++
++[0-9a-f]* <tlbs>:
++ *[0-9a-f]*: d6 53 tlbs
++
++[0-9a-f]* <tlbw>:
++ *[0-9a-f]*: d6 63 tlbw
++
++[0-9a-f]* <breakpoint>:
++ *[0-9a-f]*: d6 73 breakpoint
++
++[0-9a-f]* <incjosp>:
++ *[0-9a-f]*: d6 83 incjosp 1
++ *[0-9a-f]*: d6 93 incjosp 2
++ *[0-9a-f]*: d6 a3 incjosp 3
++ *[0-9a-f]*: d6 b3 incjosp 4
++ *[0-9a-f]*: d6 c3 incjosp -4
++ *[0-9a-f]*: d6 d3 incjosp -3
++ *[0-9a-f]*: d6 e3 incjosp -2
++ *[0-9a-f]*: d6 f3 incjosp -1
++
++[0-9a-f]* <nop>:
++ *[0-9a-f]*: d7 03 nop
++
++[0-9a-f]* <popjc>:
++ *[0-9a-f]*: d7 13 popjc
++
++[0-9a-f]* <pushjc>:
++ *[0-9a-f]*: d7 23 pushjc
++
++[0-9a-f]* <add2>:
++ *[0-9a-f]*: fe 0f 00 0f add pc,pc,pc
++ *[0-9a-f]*: f8 0c 00 3c add r12,r12,r12<<0x3
++ *[0-9a-f]*: ea 05 00 25 add r5,r5,r5<<0x2
++ *[0-9a-f]*: e8 04 00 14 add r4,r4,r4<<0x1
++ *[0-9a-f]*: fc 0e 00 1e add lr,lr,lr<<0x1
++ *[0-9a-f]*: f8 00 00 10 add r0,r12,r0<<0x1
++ *[0-9a-f]*: f8 04 00 09 add r9,r12,r4
++ *[0-9a-f]*: f8 07 00 2c add r12,r12,r7<<0x2
++
++[0-9a-f]* <sub2>:
++ *[0-9a-f]*: fe 0f 01 0f sub pc,pc,pc
++ *[0-9a-f]*: f8 0c 01 3c sub r12,r12,r12<<0x3
++ *[0-9a-f]*: ea 05 01 25 sub r5,r5,r5<<0x2
++ *[0-9a-f]*: e8 04 01 14 sub r4,r4,r4<<0x1
++ *[0-9a-f]*: fc 0e 01 1e sub lr,lr,lr<<0x1
++ *[0-9a-f]*: e6 04 01 0d sub sp,r3,r4
++ *[0-9a-f]*: ee 03 01 03 sub r3,r7,r3
++ *[0-9a-f]*: f4 0d 01 1d sub sp,r10,sp<<0x1
++
++[0-9a-f]* <divu>:
++ *[0-9a-f]*: fe 0f 0d 0f divu pc,pc,pc
++ *[0-9a-f]*: f8 0c 0d 0c divu r12,r12,r12
++ *[0-9a-f]*: ea 05 0d 05 divu r5,r5,r5
++ *[0-9a-f]*: e8 04 0d 04 divu r4,r4,r4
++ *[0-9a-f]*: fc 0e 0d 0e divu lr,lr,lr
++ *[0-9a-f]*: e8 0f 0d 0d divu sp,r4,pc
++ *[0-9a-f]*: ea 0d 0d 05 divu r5,r5,sp
++ *[0-9a-f]*: fa 00 0d 0a divu r10,sp,r0
++
++[0-9a-f]* <addhh_w>:
++ *[0-9a-f]*: fe 0f 0e 0f addhh\.w pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 0e 3c addhh\.w r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 0e 35 addhh\.w r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 0e 04 addhh\.w r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 0e 3e addhh\.w lr,lr:t,lr:t
++ *[0-9a-f]*: e0 03 0e 00 addhh\.w r0,r0:b,r3:b
++ *[0-9a-f]*: f8 07 0e 2e addhh\.w lr,r12:t,r7:b
++ *[0-9a-f]*: f4 02 0e 23 addhh\.w r3,r10:t,r2:b
++
++[0-9a-f]* <subhh_w>:
++ *[0-9a-f]*: fe 0f 0f 0f subhh\.w pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 0f 3c subhh\.w r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 0f 35 subhh\.w r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 0f 04 subhh\.w r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 0f 3e subhh\.w lr,lr:t,lr:t
++ *[0-9a-f]*: e2 07 0f 2a subhh\.w r10,r1:t,r7:b
++ *[0-9a-f]*: f4 0e 0f 3f subhh\.w pc,r10:t,lr:t
++ *[0-9a-f]*: e0 0c 0f 23 subhh\.w r3,r0:t,r12:b
++
++[0-9a-f]* <adc>:
++ *[0-9a-f]*: fe 0f 00 4f adc pc,pc,pc
++ *[0-9a-f]*: f8 0c 00 4c adc r12,r12,r12
++ *[0-9a-f]*: ea 05 00 45 adc r5,r5,r5
++ *[0-9a-f]*: e8 04 00 44 adc r4,r4,r4
++ *[0-9a-f]*: fc 0e 00 4e adc lr,lr,lr
++ *[0-9a-f]*: e0 07 00 44 adc r4,r0,r7
++ *[0-9a-f]*: e8 03 00 4d adc sp,r4,r3
++ *[0-9a-f]*: f8 00 00 42 adc r2,r12,r0
++
++[0-9a-f]* <sbc>:
++ *[0-9a-f]*: fe 0f 01 4f sbc pc,pc,pc
++ *[0-9a-f]*: f8 0c 01 4c sbc r12,r12,r12
++ *[0-9a-f]*: ea 05 01 45 sbc r5,r5,r5
++ *[0-9a-f]*: e8 04 01 44 sbc r4,r4,r4
++ *[0-9a-f]*: fc 0e 01 4e sbc lr,lr,lr
++ *[0-9a-f]*: ee 09 01 46 sbc r6,r7,r9
++ *[0-9a-f]*: f0 05 01 40 sbc r0,r8,r5
++ *[0-9a-f]*: e0 04 01 41 sbc r1,r0,r4
++
++[0-9a-f]* <mul_2>:
++ *[0-9a-f]*: fe 0f 02 4f mul pc,pc,pc
++ *[0-9a-f]*: f8 0c 02 4c mul r12,r12,r12
++ *[0-9a-f]*: ea 05 02 45 mul r5,r5,r5
++ *[0-9a-f]*: e8 04 02 44 mul r4,r4,r4
++ *[0-9a-f]*: fc 0e 02 4e mul lr,lr,lr
++ *[0-9a-f]*: e0 00 02 4f mul pc,r0,r0
++ *[0-9a-f]*: fe 0e 02 48 mul r8,pc,lr
++ *[0-9a-f]*: f8 0f 02 44 mul r4,r12,pc
++
++[0-9a-f]* <mac>:
++ *[0-9a-f]*: fe 0f 03 4f mac pc,pc,pc
++ *[0-9a-f]*: f8 0c 03 4c mac r12,r12,r12
++ *[0-9a-f]*: ea 05 03 45 mac r5,r5,r5
++ *[0-9a-f]*: e8 04 03 44 mac r4,r4,r4
++ *[0-9a-f]*: fc 0e 03 4e mac lr,lr,lr
++ *[0-9a-f]*: e8 00 03 4a mac r10,r4,r0
++ *[0-9a-f]*: fc 00 03 47 mac r7,lr,r0
++ *[0-9a-f]*: f2 0c 03 42 mac r2,r9,r12
++
++[0-9a-f]* <mulsd>:
++ *[0-9a-f]*: fe 0f 04 4f muls\.d pc,pc,pc
++ *[0-9a-f]*: f8 0c 04 4c muls\.d r12,r12,r12
++ *[0-9a-f]*: ea 05 04 45 muls\.d r5,r5,r5
++ *[0-9a-f]*: e8 04 04 44 muls\.d r4,r4,r4
++ *[0-9a-f]*: fc 0e 04 4e muls\.d lr,lr,lr
++ *[0-9a-f]*: f0 0e 04 42 muls\.d r2,r8,lr
++ *[0-9a-f]*: e0 0b 04 44 muls\.d r4,r0,r11
++ *[0-9a-f]*: fc 06 04 45 muls\.d r5,lr,r6
++
++[0-9a-f]* <macsd>:
++ *[0-9a-f]*: fe 0f 05 40 macs\.d r0,pc,pc
++ *[0-9a-f]*: f8 0c 05 4e macs\.d lr,r12,r12
++ *[0-9a-f]*: ea 05 05 48 macs\.d r8,r5,r5
++ *[0-9a-f]*: e8 04 05 46 macs\.d r6,r4,r4
++ *[0-9a-f]*: fc 0e 05 42 macs\.d r2,lr,lr
++ *[0-9a-f]*: e2 09 05 48 macs\.d r8,r1,r9
++ *[0-9a-f]*: f0 08 05 4e macs\.d lr,r8,r8
++ *[0-9a-f]*: e6 0c 05 44 macs\.d r4,r3,r12
++
++[0-9a-f]* <mulud>:
++ *[0-9a-f]*: fe 0f 06 40 mulu\.d r0,pc,pc
++ *[0-9a-f]*: f8 0c 06 4e mulu\.d lr,r12,r12
++ *[0-9a-f]*: ea 05 06 48 mulu\.d r8,r5,r5
++ *[0-9a-f]*: e8 04 06 46 mulu\.d r6,r4,r4
++ *[0-9a-f]*: fc 0e 06 42 mulu\.d r2,lr,lr
++ *[0-9a-f]*: ea 00 06 46 mulu\.d r6,r5,r0
++ *[0-9a-f]*: ec 01 06 44 mulu\.d r4,r6,r1
++ *[0-9a-f]*: f0 02 06 48 mulu\.d r8,r8,r2
++
++[0-9a-f]* <macud>:
++ *[0-9a-f]*: fe 0f 07 40 macu\.d r0,pc,pc
++ *[0-9a-f]*: f8 0c 07 4e macu\.d lr,r12,r12
++ *[0-9a-f]*: ea 05 07 48 macu\.d r8,r5,r5
++ *[0-9a-f]*: e8 04 07 46 macu\.d r6,r4,r4
++ *[0-9a-f]*: fc 0e 07 42 macu\.d r2,lr,lr
++ *[0-9a-f]*: fa 0b 07 46 macu\.d r6,sp,r11
++ *[0-9a-f]*: e8 08 07 42 macu\.d r2,r4,r8
++ *[0-9a-f]*: f4 09 07 46 macu\.d r6,r10,r9
++
++[0-9a-f]* <asr_1>:
++ *[0-9a-f]*: fe 0f 08 4f asr pc,pc,pc
++ *[0-9a-f]*: f8 0c 08 4c asr r12,r12,r12
++ *[0-9a-f]*: ea 05 08 45 asr r5,r5,r5
++ *[0-9a-f]*: e8 04 08 44 asr r4,r4,r4
++ *[0-9a-f]*: fc 0e 08 4e asr lr,lr,lr
++ *[0-9a-f]*: ec 0f 08 4f asr pc,r6,pc
++ *[0-9a-f]*: ec 0c 08 40 asr r0,r6,r12
++ *[0-9a-f]*: fa 00 08 44 asr r4,sp,r0
++
++[0-9a-f]* <lsl_1>:
++ *[0-9a-f]*: fe 0f 09 4f lsl pc,pc,pc
++ *[0-9a-f]*: f8 0c 09 4c lsl r12,r12,r12
++ *[0-9a-f]*: ea 05 09 45 lsl r5,r5,r5
++ *[0-9a-f]*: e8 04 09 44 lsl r4,r4,r4
++ *[0-9a-f]*: fc 0e 09 4e lsl lr,lr,lr
++ *[0-9a-f]*: ea 0e 09 4e lsl lr,r5,lr
++ *[0-9a-f]*: fe 03 09 45 lsl r5,pc,r3
++ *[0-9a-f]*: fe 09 09 41 lsl r1,pc,r9
++
++[0-9a-f]* <lsr_1>:
++ *[0-9a-f]*: fe 0f 0a 4f lsr pc,pc,pc
++ *[0-9a-f]*: f8 0c 0a 4c lsr r12,r12,r12
++ *[0-9a-f]*: ea 05 0a 45 lsr r5,r5,r5
++ *[0-9a-f]*: e8 04 0a 44 lsr r4,r4,r4
++ *[0-9a-f]*: fc 0e 0a 4e lsr lr,lr,lr
++ *[0-9a-f]*: e8 01 0a 42 lsr r2,r4,r1
++ *[0-9a-f]*: e2 06 0a 45 lsr r5,r1,r6
++ *[0-9a-f]*: ec 07 0a 4d lsr sp,r6,r7
++
++[0-9a-f]* <xchg>:
++ *[0-9a-f]*: fe 0f 0b 4f xchg pc,pc,pc
++ *[0-9a-f]*: f8 0c 0b 4c xchg r12,r12,r12
++ *[0-9a-f]*: ea 05 0b 45 xchg r5,r5,r5
++ *[0-9a-f]*: e8 04 0b 44 xchg r4,r4,r4
++ *[0-9a-f]*: fc 0e 0b 4e xchg lr,lr,lr
++ *[0-9a-f]*: e8 0d 0b 4e xchg lr,r4,sp
++ *[0-9a-f]*: ea 0c 0b 41 xchg r1,r5,r12
++ *[0-9a-f]*: f8 00 0b 4e xchg lr,r12,r0
++
++[0-9a-f]* <max>:
++ *[0-9a-f]*: fe 0f 0c 4f max pc,pc,pc
++ *[0-9a-f]*: f8 0c 0c 4c max r12,r12,r12
++ *[0-9a-f]*: ea 05 0c 45 max r5,r5,r5
++ *[0-9a-f]*: e8 04 0c 44 max r4,r4,r4
++ *[0-9a-f]*: fc 0e 0c 4e max lr,lr,lr
++ *[0-9a-f]*: e4 0d 0c 4e max lr,r2,sp
++ *[0-9a-f]*: f4 09 0c 44 max r4,r10,r9
++ *[0-9a-f]*: f2 0e 0c 4e max lr,r9,lr
++
++[0-9a-f]* <min>:
++ *[0-9a-f]*: fe 0f 0d 4f min pc,pc,pc
++ *[0-9a-f]*: f8 0c 0d 4c min r12,r12,r12
++ *[0-9a-f]*: ea 05 0d 45 min r5,r5,r5
++ *[0-9a-f]*: e8 04 0d 44 min r4,r4,r4
++ *[0-9a-f]*: fc 0e 0d 4e min lr,lr,lr
++ *[0-9a-f]*: ee 08 0d 49 min r9,r7,r8
++ *[0-9a-f]*: ea 05 0d 4d min sp,r5,r5
++ *[0-9a-f]*: e2 04 0d 44 min r4,r1,r4
++
++[0-9a-f]* <addabs>:
++ *[0-9a-f]*: fe 0f 0e 4f addabs pc,pc,pc
++ *[0-9a-f]*: f8 0c 0e 4c addabs r12,r12,r12
++ *[0-9a-f]*: ea 05 0e 45 addabs r5,r5,r5
++ *[0-9a-f]*: e8 04 0e 44 addabs r4,r4,r4
++ *[0-9a-f]*: fc 0e 0e 4e addabs lr,lr,lr
++ *[0-9a-f]*: f4 00 0e 47 addabs r7,r10,r0
++ *[0-9a-f]*: f2 07 0e 49 addabs r9,r9,r7
++ *[0-9a-f]*: f0 0c 0e 42 addabs r2,r8,r12
++
++[0-9a-f]* <mulnhh_w>:
++ *[0-9a-f]*: fe 0f 01 8f mulnhh\.w pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 01 bc mulnhh\.w r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 01 b5 mulnhh\.w r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 01 84 mulnhh\.w r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 01 be mulnhh\.w lr,lr:t,lr:t
++ *[0-9a-f]*: fa 09 01 ab mulnhh\.w r11,sp:t,r9:b
++ *[0-9a-f]*: e8 0e 01 9d mulnhh\.w sp,r4:b,lr:t
++ *[0-9a-f]*: e4 0b 01 ac mulnhh\.w r12,r2:t,r11:b
++
++[0-9a-f]* <mulnwh_d>:
++ *[0-9a-f]*: fe 0f 02 80 mulnwh\.d r0,pc,pc:b
++ *[0-9a-f]*: f8 0c 02 9e mulnwh\.d lr,r12,r12:t
++ *[0-9a-f]*: ea 05 02 98 mulnwh\.d r8,r5,r5:t
++ *[0-9a-f]*: e8 04 02 86 mulnwh\.d r6,r4,r4:b
++ *[0-9a-f]*: fc 0e 02 92 mulnwh\.d r2,lr,lr:t
++ *[0-9a-f]*: e6 02 02 9e mulnwh\.d lr,r3,r2:t
++ *[0-9a-f]*: ea 09 02 84 mulnwh\.d r4,r5,r9:b
++ *[0-9a-f]*: e8 04 02 9c mulnwh\.d r12,r4,r4:t
++
++[0-9a-f]* <machh_w>:
++ *[0-9a-f]*: fe 0f 04 8f machh\.w pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 04 bc machh\.w r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 04 b5 machh\.w r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 04 84 machh\.w r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 04 be machh\.w lr,lr:t,lr:t
++ *[0-9a-f]*: ea 01 04 9e machh\.w lr,r5:b,r1:t
++ *[0-9a-f]*: ec 07 04 89 machh\.w r9,r6:b,r7:b
++ *[0-9a-f]*: fc 0c 04 a5 machh\.w r5,lr:t,r12:b
++
++[0-9a-f]* <machh_d>:
++ *[0-9a-f]*: fe 0f 05 80 machh\.d r0,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 05 be machh\.d lr,r12:t,r12:t
++ *[0-9a-f]*: ea 05 05 b8 machh\.d r8,r5:t,r5:t
++ *[0-9a-f]*: e8 04 05 86 machh\.d r6,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 05 b2 machh\.d r2,lr:t,lr:t
++ *[0-9a-f]*: e0 08 05 8a machh\.d r10,r0:b,r8:b
++ *[0-9a-f]*: e8 05 05 9e machh\.d lr,r4:b,r5:t
++ *[0-9a-f]*: e0 04 05 98 machh\.d r8,r0:b,r4:t
++
++[0-9a-f]* <macsathh_w>:
++ *[0-9a-f]*: fe 0f 06 8f macsathh\.w pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 06 bc macsathh\.w r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 06 b5 macsathh\.w r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 06 84 macsathh\.w r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 06 be macsathh\.w lr,lr:t,lr:t
++ *[0-9a-f]*: ee 0f 06 b7 macsathh\.w r7,r7:t,pc:t
++ *[0-9a-f]*: e4 04 06 a4 macsathh\.w r4,r2:t,r4:b
++ *[0-9a-f]*: f0 03 06 b4 macsathh\.w r4,r8:t,r3:t
++
++[0-9a-f]* <mulhh_w>:
++ *[0-9a-f]*: fe 0f 07 8f mulhh\.w pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 07 bc mulhh\.w r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 07 b5 mulhh\.w r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 07 84 mulhh\.w r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 07 be mulhh\.w lr,lr:t,lr:t
++ *[0-9a-f]*: e8 09 07 a7 mulhh\.w r7,r4:t,r9:b
++ *[0-9a-f]*: e6 07 07 bf mulhh\.w pc,r3:t,r7:t
++ *[0-9a-f]*: e8 09 07 9f mulhh\.w pc,r4:b,r9:t
++
++[0-9a-f]* <mulsathh_h>:
++ *[0-9a-f]*: fe 0f 08 8f mulsathh\.h pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 08 bc mulsathh\.h r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 08 b5 mulsathh\.h r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 08 84 mulsathh\.h r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 08 be mulsathh\.h lr,lr:t,lr:t
++ *[0-9a-f]*: e2 0d 08 83 mulsathh\.h r3,r1:b,sp:b
++ *[0-9a-f]*: fc 0b 08 ab mulsathh\.h r11,lr:t,r11:b
++ *[0-9a-f]*: f0 0b 08 98 mulsathh\.h r8,r8:b,r11:t
++
++[0-9a-f]* <mulsathh_w>:
++ *[0-9a-f]*: fe 0f 09 8f mulsathh\.w pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 09 bc mulsathh\.w r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 09 b5 mulsathh\.w r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 09 84 mulsathh\.w r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 09 be mulsathh\.w lr,lr:t,lr:t
++ *[0-9a-f]*: f6 06 09 ae mulsathh\.w lr,r11:t,r6:b
++ *[0-9a-f]*: ec 07 09 96 mulsathh\.w r6,r6:b,r7:t
++ *[0-9a-f]*: e4 03 09 8a mulsathh\.w r10,r2:b,r3:b
++
++[0-9a-f]* <mulsatrndhh_h>:
++ *[0-9a-f]*: fe 0f 0a 8f mulsatrndhh\.h pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 0a bc mulsatrndhh\.h r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 0a b5 mulsatrndhh\.h r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 0a 84 mulsatrndhh\.h r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 0a be mulsatrndhh\.h lr,lr:t,lr:t
++ *[0-9a-f]*: ec 09 0a 8b mulsatrndhh\.h r11,r6:b,r9:b
++ *[0-9a-f]*: e6 08 0a 9b mulsatrndhh\.h r11,r3:b,r8:t
++ *[0-9a-f]*: fa 07 0a b5 mulsatrndhh\.h r5,sp:t,r7:t
++
++[0-9a-f]* <mulsatrndwh_w>:
++ *[0-9a-f]*: fe 0f 0b 8f mulsatrndwh\.w pc,pc,pc:b
++ *[0-9a-f]*: f8 0c 0b 9c mulsatrndwh\.w r12,r12,r12:t
++ *[0-9a-f]*: ea 05 0b 95 mulsatrndwh\.w r5,r5,r5:t
++ *[0-9a-f]*: e8 04 0b 84 mulsatrndwh\.w r4,r4,r4:b
++ *[0-9a-f]*: fc 0e 0b 9e mulsatrndwh\.w lr,lr,lr:t
++ *[0-9a-f]*: f8 00 0b 85 mulsatrndwh\.w r5,r12,r0:b
++ *[0-9a-f]*: f4 0f 0b 87 mulsatrndwh\.w r7,r10,pc:b
++ *[0-9a-f]*: f0 05 0b 9a mulsatrndwh\.w r10,r8,r5:t
++
++[0-9a-f]* <macwh_d>:
++ *[0-9a-f]*: fe 0f 0c 80 macwh\.d r0,pc,pc:b
++ *[0-9a-f]*: f8 0c 0c 9e macwh\.d lr,r12,r12:t
++ *[0-9a-f]*: ea 05 0c 98 macwh\.d r8,r5,r5:t
++ *[0-9a-f]*: e8 04 0c 86 macwh\.d r6,r4,r4:b
++ *[0-9a-f]*: fc 0e 0c 92 macwh\.d r2,lr,lr:t
++ *[0-9a-f]*: f4 0c 0c 94 macwh\.d r4,r10,r12:t
++ *[0-9a-f]*: ee 0d 0c 84 macwh\.d r4,r7,sp:b
++ *[0-9a-f]*: f2 0b 0c 8e macwh\.d lr,r9,r11:b
++
++[0-9a-f]* <mulwh_d>:
++ *[0-9a-f]*: fe 0f 0d 80 mulwh\.d r0,pc,pc:b
++ *[0-9a-f]*: f8 0c 0d 9e mulwh\.d lr,r12,r12:t
++ *[0-9a-f]*: ea 05 0d 98 mulwh\.d r8,r5,r5:t
++ *[0-9a-f]*: e8 04 0d 86 mulwh\.d r6,r4,r4:b
++ *[0-9a-f]*: fc 0e 0d 92 mulwh\.d r2,lr,lr:t
++ *[0-9a-f]*: ea 01 0d 8c mulwh\.d r12,r5,r1:b
++ *[0-9a-f]*: e2 03 0d 90 mulwh\.d r0,r1,r3:t
++ *[0-9a-f]*: f2 02 0d 80 mulwh\.d r0,r9,r2:b
++
++[0-9a-f]* <mulsatwh_w>:
++ *[0-9a-f]*: fe 0f 0e 8f mulsatwh\.w pc,pc,pc:b
++ *[0-9a-f]*: f8 0c 0e 9c mulsatwh\.w r12,r12,r12:t
++ *[0-9a-f]*: ea 05 0e 95 mulsatwh\.w r5,r5,r5:t
++ *[0-9a-f]*: e8 04 0e 84 mulsatwh\.w r4,r4,r4:b
++ *[0-9a-f]*: fc 0e 0e 9e mulsatwh\.w lr,lr,lr:t
++ *[0-9a-f]*: fe 0a 0e 9b mulsatwh\.w r11,pc,r10:t
++ *[0-9a-f]*: f8 09 0e 9d mulsatwh\.w sp,r12,r9:t
++ *[0-9a-f]*: e6 02 0e 90 mulsatwh\.w r0,r3,r2:t
++
++[0-9a-f]* <ldw7>:
++ *[0-9a-f]*: fe 0f 0f 8f ld\.w pc,pc\[pc:b<<2\]
++ *[0-9a-f]*: f8 0c 0f bc ld\.w r12,r12\[r12:t<<2\]
++ *[0-9a-f]*: ea 05 0f a5 ld\.w r5,r5\[r5:u<<2\]
++ *[0-9a-f]*: e8 04 0f 94 ld\.w r4,r4\[r4:l<<2\]
++ *[0-9a-f]*: fc 0e 0f 9e ld\.w lr,lr\[lr:l<<2\]
++ *[0-9a-f]*: f4 06 0f 99 ld\.w r9,r10\[r6:l<<2\]
++ *[0-9a-f]*: f4 0a 0f 82 ld\.w r2,r10\[r10:b<<2\]
++ *[0-9a-f]*: ea 0f 0f 8b ld\.w r11,r5\[pc:b<<2\]
++
++[0-9a-f]* <satadd_w>:
++ *[0-9a-f]*: fe 0f 00 cf satadd\.w pc,pc,pc
++ *[0-9a-f]*: f8 0c 00 cc satadd\.w r12,r12,r12
++ *[0-9a-f]*: ea 05 00 c5 satadd\.w r5,r5,r5
++ *[0-9a-f]*: e8 04 00 c4 satadd\.w r4,r4,r4
++ *[0-9a-f]*: fc 0e 00 ce satadd\.w lr,lr,lr
++ *[0-9a-f]*: f0 0b 00 c4 satadd\.w r4,r8,r11
++ *[0-9a-f]*: f8 06 00 c3 satadd\.w r3,r12,r6
++ *[0-9a-f]*: fc 09 00 c3 satadd\.w r3,lr,r9
++
++[0-9a-f]* <satsub_w1>:
++ *[0-9a-f]*: fe 0f 01 cf satsub\.w pc,pc,pc
++ *[0-9a-f]*: f8 0c 01 cc satsub\.w r12,r12,r12
++ *[0-9a-f]*: ea 05 01 c5 satsub\.w r5,r5,r5
++ *[0-9a-f]*: e8 04 01 c4 satsub\.w r4,r4,r4
++ *[0-9a-f]*: fc 0e 01 ce satsub\.w lr,lr,lr
++ *[0-9a-f]*: fa 00 01 c8 satsub\.w r8,sp,r0
++ *[0-9a-f]*: f0 04 01 c9 satsub\.w r9,r8,r4
++ *[0-9a-f]*: fc 02 01 cf satsub\.w pc,lr,r2
++
++[0-9a-f]* <satadd_h>:
++ *[0-9a-f]*: fe 0f 02 cf satadd\.h pc,pc,pc
++ *[0-9a-f]*: f8 0c 02 cc satadd\.h r12,r12,r12
++ *[0-9a-f]*: ea 05 02 c5 satadd\.h r5,r5,r5
++ *[0-9a-f]*: e8 04 02 c4 satadd\.h r4,r4,r4
++ *[0-9a-f]*: fc 0e 02 ce satadd\.h lr,lr,lr
++ *[0-9a-f]*: e6 09 02 c7 satadd\.h r7,r3,r9
++ *[0-9a-f]*: e0 02 02 c1 satadd\.h r1,r0,r2
++ *[0-9a-f]*: e8 0e 02 c1 satadd\.h r1,r4,lr
++
++[0-9a-f]* <satsub_h>:
++ *[0-9a-f]*: fe 0f 03 cf satsub\.h pc,pc,pc
++ *[0-9a-f]*: f8 0c 03 cc satsub\.h r12,r12,r12
++ *[0-9a-f]*: ea 05 03 c5 satsub\.h r5,r5,r5
++ *[0-9a-f]*: e8 04 03 c4 satsub\.h r4,r4,r4
++ *[0-9a-f]*: fc 0e 03 ce satsub\.h lr,lr,lr
++ *[0-9a-f]*: fc 03 03 ce satsub\.h lr,lr,r3
++ *[0-9a-f]*: ec 05 03 cb satsub\.h r11,r6,r5
++ *[0-9a-f]*: fa 00 03 c3 satsub\.h r3,sp,r0
++
++[0-9a-f]* <mul3>:
++ *[0-9a-f]*: fe 0f 10 00 mul pc,pc,0
++ *[0-9a-f]*: f8 0c 10 ff mul r12,r12,-1
++ *[0-9a-f]*: ea 05 10 80 mul r5,r5,-128
++ *[0-9a-f]*: e8 04 10 7f mul r4,r4,127
++ *[0-9a-f]*: fc 0e 10 01 mul lr,lr,1
++ *[0-9a-f]*: e4 0c 10 f9 mul r12,r2,-7
++ *[0-9a-f]*: fe 01 10 5f mul r1,pc,95
++ *[0-9a-f]*: ec 04 10 13 mul r4,r6,19
++
++[0-9a-f]* <rsub2>:
++ *[0-9a-f]*: fe 0f 11 00 rsub pc,pc,0
++ *[0-9a-f]*: f8 0c 11 ff rsub r12,r12,-1
++ *[0-9a-f]*: ea 05 11 80 rsub r5,r5,-128
++ *[0-9a-f]*: e8 04 11 7f rsub r4,r4,127
++ *[0-9a-f]*: fc 0e 11 01 rsub lr,lr,1
++ *[0-9a-f]*: fc 09 11 60 rsub r9,lr,96
++ *[0-9a-f]*: e2 0b 11 38 rsub r11,r1,56
++ *[0-9a-f]*: ee 00 11 a9 rsub r0,r7,-87
++
++[0-9a-f]* <clz>:
++ *[0-9a-f]*: fe 0f 12 00 clz pc,pc
++ *[0-9a-f]*: f8 0c 12 00 clz r12,r12
++ *[0-9a-f]*: ea 05 12 00 clz r5,r5
++ *[0-9a-f]*: e8 04 12 00 clz r4,r4
++ *[0-9a-f]*: fc 0e 12 00 clz lr,lr
++ *[0-9a-f]*: e6 02 12 00 clz r2,r3
++ *[0-9a-f]*: f6 05 12 00 clz r5,r11
++ *[0-9a-f]*: e6 0f 12 00 clz pc,r3
++
++[0-9a-f]* <cpc1>:
++ *[0-9a-f]*: fe 0f 13 00 cpc pc,pc
++ *[0-9a-f]*: f8 0c 13 00 cpc r12,r12
++ *[0-9a-f]*: ea 05 13 00 cpc r5,r5
++ *[0-9a-f]*: e8 04 13 00 cpc r4,r4
++ *[0-9a-f]*: fc 0e 13 00 cpc lr,lr
++ *[0-9a-f]*: e8 0f 13 00 cpc pc,r4
++ *[0-9a-f]*: f2 05 13 00 cpc r5,r9
++ *[0-9a-f]*: ee 06 13 00 cpc r6,r7
++
++[0-9a-f]* <asr3>:
++ *[0-9a-f]*: fe 0f 14 00 asr pc,pc,0x0
++ *[0-9a-f]*: f8 0c 14 1f asr r12,r12,0x1f
++ *[0-9a-f]*: ea 05 14 10 asr r5,r5,0x10
++ *[0-9a-f]*: e8 04 14 0f asr r4,r4,0xf
++ *[0-9a-f]*: fc 0e 14 01 asr lr,lr,0x1
++ *[0-9a-f]*: f6 04 14 13 asr r4,r11,0x13
++ *[0-9a-f]*: fe 0d 14 1a asr sp,pc,0x1a
++ *[0-9a-f]*: fa 0b 14 08 asr r11,sp,0x8
++
++[0-9a-f]* <lsl3>:
++ *[0-9a-f]*: fe 0f 15 00 lsl pc,pc,0x0
++ *[0-9a-f]*: f8 0c 15 1f lsl r12,r12,0x1f
++ *[0-9a-f]*: ea 05 15 10 lsl r5,r5,0x10
++ *[0-9a-f]*: e8 04 15 0f lsl r4,r4,0xf
++ *[0-9a-f]*: fc 0e 15 01 lsl lr,lr,0x1
++ *[0-9a-f]*: f4 08 15 11 lsl r8,r10,0x11
++ *[0-9a-f]*: fc 02 15 03 lsl r2,lr,0x3
++ *[0-9a-f]*: f6 0e 15 0e lsl lr,r11,0xe
++
++[0-9a-f]* <lsr3>:
++ *[0-9a-f]*: fe 0f 16 00 lsr pc,pc,0x0
++ *[0-9a-f]*: f8 0c 16 1f lsr r12,r12,0x1f
++ *[0-9a-f]*: ea 05 16 10 lsr r5,r5,0x10
++ *[0-9a-f]*: e8 04 16 0f lsr r4,r4,0xf
++ *[0-9a-f]*: fc 0e 16 01 lsr lr,lr,0x1
++ *[0-9a-f]*: e6 04 16 1f lsr r4,r3,0x1f
++ *[0-9a-f]*: f2 0f 16 0e lsr pc,r9,0xe
++ *[0-9a-f]*: e0 03 16 06 lsr r3,r0,0x6
++
++[0-9a-f]* <movc1>:
++ *[0-9a-f]*: fe 0f 17 00 moveq pc,pc
++ *[0-9a-f]*: f8 0c 17 f0 moval r12,r12
++ *[0-9a-f]*: ea 05 17 80 movls r5,r5
++ *[0-9a-f]*: e8 04 17 70 movpl r4,r4
++ *[0-9a-f]*: fc 0e 17 10 movne lr,lr
++ *[0-9a-f]*: f6 0f 17 10 movne pc,r11
++ *[0-9a-f]*: e4 0a 17 60 movmi r10,r2
++ *[0-9a-f]*: f8 08 17 80 movls r8,r12
++
++[0-9a-f]* <padd_h>:
++ *[0-9a-f]*: fe 0f 20 0f padd\.h pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 0c padd\.h r12,r12,r12
++ *[0-9a-f]*: ea 05 20 05 padd\.h r5,r5,r5
++ *[0-9a-f]*: e8 04 20 04 padd\.h r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 0e padd\.h lr,lr,lr
++ *[0-9a-f]*: e4 07 20 08 padd\.h r8,r2,r7
++ *[0-9a-f]*: e0 03 20 00 padd\.h r0,r0,r3
++ *[0-9a-f]*: f6 06 20 0d padd\.h sp,r11,r6
++
++[0-9a-f]* <psub_h>:
++ *[0-9a-f]*: fe 0f 20 1f psub\.h pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 1c psub\.h r12,r12,r12
++ *[0-9a-f]*: ea 05 20 15 psub\.h r5,r5,r5
++ *[0-9a-f]*: e8 04 20 14 psub\.h r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 1e psub\.h lr,lr,lr
++ *[0-9a-f]*: ec 08 20 1e psub\.h lr,r6,r8
++ *[0-9a-f]*: e2 0d 20 10 psub\.h r0,r1,sp
++ *[0-9a-f]*: fe 0d 20 1f psub\.h pc,pc,sp
++
++[0-9a-f]* <paddx_h>:
++ *[0-9a-f]*: fe 0f 20 2f paddx\.h pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 2c paddx\.h r12,r12,r12
++ *[0-9a-f]*: ea 05 20 25 paddx\.h r5,r5,r5
++ *[0-9a-f]*: e8 04 20 24 paddx\.h r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 2e paddx\.h lr,lr,lr
++ *[0-9a-f]*: fe 01 20 2f paddx\.h pc,pc,r1
++ *[0-9a-f]*: e8 05 20 2a paddx\.h r10,r4,r5
++ *[0-9a-f]*: fe 02 20 25 paddx\.h r5,pc,r2
++
++[0-9a-f]* <psubx_h>:
++ *[0-9a-f]*: fe 0f 20 3f psubx\.h pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 3c psubx\.h r12,r12,r12
++ *[0-9a-f]*: ea 05 20 35 psubx\.h r5,r5,r5
++ *[0-9a-f]*: e8 04 20 34 psubx\.h r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 3e psubx\.h lr,lr,lr
++ *[0-9a-f]*: f8 05 20 35 psubx\.h r5,r12,r5
++ *[0-9a-f]*: f0 03 20 33 psubx\.h r3,r8,r3
++ *[0-9a-f]*: e4 03 20 35 psubx\.h r5,r2,r3
++
++[0-9a-f]* <padds_sh>:
++ *[0-9a-f]*: fe 0f 20 4f padds\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 4c padds\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 45 padds\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 44 padds\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 4e padds\.sh lr,lr,lr
++ *[0-9a-f]*: fc 02 20 49 padds\.sh r9,lr,r2
++ *[0-9a-f]*: f0 01 20 46 padds\.sh r6,r8,r1
++ *[0-9a-f]*: e8 0a 20 46 padds\.sh r6,r4,r10
++
++[0-9a-f]* <psubs_sh>:
++ *[0-9a-f]*: fe 0f 20 5f psubs\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 5c psubs\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 55 psubs\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 54 psubs\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 5e psubs\.sh lr,lr,lr
++ *[0-9a-f]*: fc 0b 20 56 psubs\.sh r6,lr,r11
++ *[0-9a-f]*: f8 04 20 52 psubs\.sh r2,r12,r4
++ *[0-9a-f]*: f2 00 20 50 psubs\.sh r0,r9,r0
++
++[0-9a-f]* <paddxs_sh>:
++ *[0-9a-f]*: fe 0f 20 6f paddxs\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 6c paddxs\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 65 paddxs\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 64 paddxs\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 6e paddxs\.sh lr,lr,lr
++ *[0-9a-f]*: e6 09 20 60 paddxs\.sh r0,r3,r9
++ *[0-9a-f]*: f4 0b 20 6f paddxs\.sh pc,r10,r11
++ *[0-9a-f]*: f4 0f 20 6f paddxs\.sh pc,r10,pc
++
++[0-9a-f]* <psubxs_sh>:
++ *[0-9a-f]*: fe 0f 20 7f psubxs\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 7c psubxs\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 75 psubxs\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 74 psubxs\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 7e psubxs\.sh lr,lr,lr
++ *[0-9a-f]*: e8 04 20 77 psubxs\.sh r7,r4,r4
++ *[0-9a-f]*: f0 03 20 77 psubxs\.sh r7,r8,r3
++ *[0-9a-f]*: ec 05 20 7f psubxs\.sh pc,r6,r5
++
++[0-9a-f]* <padds_uh>:
++ *[0-9a-f]*: fe 0f 20 8f padds\.uh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 8c padds\.uh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 85 padds\.uh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 84 padds\.uh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 8e padds\.uh lr,lr,lr
++ *[0-9a-f]*: f6 07 20 8c padds\.uh r12,r11,r7
++ *[0-9a-f]*: f0 0e 20 87 padds\.uh r7,r8,lr
++ *[0-9a-f]*: f2 07 20 86 padds\.uh r6,r9,r7
++
++[0-9a-f]* <psubs_uh>:
++ *[0-9a-f]*: fe 0f 20 9f psubs\.uh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 9c psubs\.uh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 95 psubs\.uh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 94 psubs\.uh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 9e psubs\.uh lr,lr,lr
++ *[0-9a-f]*: f4 06 20 9e psubs\.uh lr,r10,r6
++ *[0-9a-f]*: e4 0f 20 9d psubs\.uh sp,r2,pc
++ *[0-9a-f]*: f2 02 20 92 psubs\.uh r2,r9,r2
++
++[0-9a-f]* <paddxs_uh>:
++ *[0-9a-f]*: fe 0f 20 af paddxs\.uh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 ac paddxs\.uh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 a5 paddxs\.uh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 a4 paddxs\.uh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 ae paddxs\.uh lr,lr,lr
++ *[0-9a-f]*: f2 05 20 a7 paddxs\.uh r7,r9,r5
++ *[0-9a-f]*: e2 04 20 a9 paddxs\.uh r9,r1,r4
++ *[0-9a-f]*: e4 03 20 a5 paddxs\.uh r5,r2,r3
++
++[0-9a-f]* <psubxs_uh>:
++ *[0-9a-f]*: fe 0f 20 bf psubxs\.uh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 bc psubxs\.uh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 b5 psubxs\.uh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 b4 psubxs\.uh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 be psubxs\.uh lr,lr,lr
++ *[0-9a-f]*: ea 0d 20 bd psubxs\.uh sp,r5,sp
++ *[0-9a-f]*: ec 06 20 bd psubxs\.uh sp,r6,r6
++ *[0-9a-f]*: f6 08 20 b3 psubxs\.uh r3,r11,r8
++
++[0-9a-f]* <paddh_sh>:
++ *[0-9a-f]*: fe 0f 20 cf paddh\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 cc paddh\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 c5 paddh\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 c4 paddh\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 ce paddh\.sh lr,lr,lr
++ *[0-9a-f]*: fa 03 20 cc paddh\.sh r12,sp,r3
++ *[0-9a-f]*: ea 03 20 cf paddh\.sh pc,r5,r3
++ *[0-9a-f]*: f0 0d 20 c8 paddh\.sh r8,r8,sp
++
++[0-9a-f]* <psubh_sh>:
++ *[0-9a-f]*: fe 0f 20 df psubh\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 dc psubh\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 d5 psubh\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 d4 psubh\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 de psubh\.sh lr,lr,lr
++ *[0-9a-f]*: ea 08 20 d1 psubh\.sh r1,r5,r8
++ *[0-9a-f]*: e6 06 20 d7 psubh\.sh r7,r3,r6
++ *[0-9a-f]*: e6 03 20 d4 psubh\.sh r4,r3,r3
++
++[0-9a-f]* <paddxh_sh>:
++ *[0-9a-f]*: fe 0f 20 ef paddxh\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 ec paddxh\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 e5 paddxh\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 e4 paddxh\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 ee paddxh\.sh lr,lr,lr
++ *[0-9a-f]*: e0 04 20 e6 paddxh\.sh r6,r0,r4
++ *[0-9a-f]*: f0 09 20 e9 paddxh\.sh r9,r8,r9
++ *[0-9a-f]*: e0 0d 20 e3 paddxh\.sh r3,r0,sp
++
++[0-9a-f]* <psubxh_sh>:
++ *[0-9a-f]*: fe 0f 20 ff psubxh\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 20 fc psubxh\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 20 f5 psubxh\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 20 f4 psubxh\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 20 fe psubxh\.sh lr,lr,lr
++ *[0-9a-f]*: fe 0c 20 f4 psubxh\.sh r4,pc,r12
++ *[0-9a-f]*: e8 06 20 f8 psubxh\.sh r8,r4,r6
++ *[0-9a-f]*: f2 04 20 fc psubxh\.sh r12,r9,r4
++
++[0-9a-f]* <paddsub_h>:
++ *[0-9a-f]*: fe 0f 21 0f paddsub\.h pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 21 3c paddsub\.h r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 21 35 paddsub\.h r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 21 04 paddsub\.h r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 21 3e paddsub\.h lr,lr:t,lr:t
++ *[0-9a-f]*: e4 0e 21 25 paddsub\.h r5,r2:t,lr:b
++ *[0-9a-f]*: e2 08 21 07 paddsub\.h r7,r1:b,r8:b
++ *[0-9a-f]*: f4 05 21 36 paddsub\.h r6,r10:t,r5:t
++
++[0-9a-f]* <psubadd_h>:
++ *[0-9a-f]*: fe 0f 21 4f psubadd\.h pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 21 7c psubadd\.h r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 21 75 psubadd\.h r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 21 44 psubadd\.h r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 21 7e psubadd\.h lr,lr:t,lr:t
++ *[0-9a-f]*: f6 08 21 79 psubadd\.h r9,r11:t,r8:t
++ *[0-9a-f]*: ee 0e 21 7a psubadd\.h r10,r7:t,lr:t
++ *[0-9a-f]*: fe 0f 21 66 psubadd\.h r6,pc:t,pc:b
++
++[0-9a-f]* <paddsubs_sh>:
++ *[0-9a-f]*: fe 0f 21 8f paddsubs\.sh pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 21 bc paddsubs\.sh r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 21 b5 paddsubs\.sh r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 21 84 paddsubs\.sh r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 21 be paddsubs\.sh lr,lr:t,lr:t
++ *[0-9a-f]*: fc 00 21 a0 paddsubs\.sh r0,lr:t,r0:b
++ *[0-9a-f]*: e4 04 21 b9 paddsubs\.sh r9,r2:t,r4:t
++ *[0-9a-f]*: f2 0d 21 bc paddsubs\.sh r12,r9:t,sp:t
++
++[0-9a-f]* <psubadds_sh>:
++ *[0-9a-f]*: fe 0f 21 cf psubadds\.sh pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 21 fc psubadds\.sh r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 21 f5 psubadds\.sh r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 21 c4 psubadds\.sh r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 21 fe psubadds\.sh lr,lr:t,lr:t
++ *[0-9a-f]*: fc 01 21 df psubadds\.sh pc,lr:b,r1:t
++ *[0-9a-f]*: e6 0c 21 cb psubadds\.sh r11,r3:b,r12:b
++ *[0-9a-f]*: e4 08 21 fa psubadds\.sh r10,r2:t,r8:t
++
++[0-9a-f]* <paddsubs_uh>:
++ *[0-9a-f]*: fe 0f 22 0f paddsubs\.uh pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 22 3c paddsubs\.uh r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 22 35 paddsubs\.uh r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 22 04 paddsubs\.uh r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 22 3e paddsubs\.uh lr,lr:t,lr:t
++ *[0-9a-f]*: e4 03 22 09 paddsubs\.uh r9,r2:b,r3:b
++ *[0-9a-f]*: fa 07 22 1d paddsubs\.uh sp,sp:b,r7:t
++ *[0-9a-f]*: e0 0a 22 1e paddsubs\.uh lr,r0:b,r10:t
++
++[0-9a-f]* <psubadds_uh>:
++ *[0-9a-f]*: fe 0f 22 4f psubadds\.uh pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 22 7c psubadds\.uh r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 22 75 psubadds\.uh r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 22 44 psubadds\.uh r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 22 7e psubadds\.uh lr,lr:t,lr:t
++ *[0-9a-f]*: f2 0f 22 7c psubadds\.uh r12,r9:t,pc:t
++ *[0-9a-f]*: ec 08 22 48 psubadds\.uh r8,r6:b,r8:b
++ *[0-9a-f]*: f0 04 22 48 psubadds\.uh r8,r8:b,r4:b
++
++[0-9a-f]* <paddsubh_sh>:
++ *[0-9a-f]*: fe 0f 22 8f paddsubh\.sh pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 22 bc paddsubh\.sh r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 22 b5 paddsubh\.sh r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 22 84 paddsubh\.sh r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 22 be paddsubh\.sh lr,lr:t,lr:t
++ *[0-9a-f]*: f2 09 22 a8 paddsubh\.sh r8,r9:t,r9:b
++ *[0-9a-f]*: fa 01 22 b0 paddsubh\.sh r0,sp:t,r1:t
++ *[0-9a-f]*: e2 00 22 93 paddsubh\.sh r3,r1:b,r0:t
++
++[0-9a-f]* <psubaddh_sh>:
++ *[0-9a-f]*: fe 0f 22 cf psubaddh\.sh pc,pc:b,pc:b
++ *[0-9a-f]*: f8 0c 22 fc psubaddh\.sh r12,r12:t,r12:t
++ *[0-9a-f]*: ea 05 22 f5 psubaddh\.sh r5,r5:t,r5:t
++ *[0-9a-f]*: e8 04 22 c4 psubaddh\.sh r4,r4:b,r4:b
++ *[0-9a-f]*: fc 0e 22 fe psubaddh\.sh lr,lr:t,lr:t
++ *[0-9a-f]*: e6 0a 22 e7 psubaddh\.sh r7,r3:t,r10:b
++ *[0-9a-f]*: e4 01 22 f7 psubaddh\.sh r7,r2:t,r1:t
++ *[0-9a-f]*: e6 06 22 cb psubaddh\.sh r11,r3:b,r6:b
++
++[0-9a-f]* <padd_b>:
++ *[0-9a-f]*: fe 0f 23 0f padd\.b pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 0c padd\.b r12,r12,r12
++ *[0-9a-f]*: ea 05 23 05 padd\.b r5,r5,r5
++ *[0-9a-f]*: e8 04 23 04 padd\.b r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 0e padd\.b lr,lr,lr
++ *[0-9a-f]*: ec 0f 23 02 padd\.b r2,r6,pc
++ *[0-9a-f]*: f2 0c 23 08 padd\.b r8,r9,r12
++ *[0-9a-f]*: f8 03 23 05 padd\.b r5,r12,r3
++
++[0-9a-f]* <psub_b>:
++ *[0-9a-f]*: fe 0f 23 1f psub\.b pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 1c psub\.b r12,r12,r12
++ *[0-9a-f]*: ea 05 23 15 psub\.b r5,r5,r5
++ *[0-9a-f]*: e8 04 23 14 psub\.b r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 1e psub\.b lr,lr,lr
++ *[0-9a-f]*: f8 0f 23 10 psub\.b r0,r12,pc
++ *[0-9a-f]*: fa 0a 23 17 psub\.b r7,sp,r10
++ *[0-9a-f]*: fa 0c 23 15 psub\.b r5,sp,r12
++
++[0-9a-f]* <padds_sb>:
++ *[0-9a-f]*: fe 0f 23 2f padds\.sb pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 2c padds\.sb r12,r12,r12
++ *[0-9a-f]*: ea 05 23 25 padds\.sb r5,r5,r5
++ *[0-9a-f]*: e8 04 23 24 padds\.sb r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 2e padds\.sb lr,lr,lr
++ *[0-9a-f]*: f6 04 23 2d padds\.sb sp,r11,r4
++ *[0-9a-f]*: f4 0b 23 2b padds\.sb r11,r10,r11
++ *[0-9a-f]*: f8 06 23 25 padds\.sb r5,r12,r6
++
++[0-9a-f]* <psubs_sb>:
++ *[0-9a-f]*: fe 0f 23 3f psubs\.sb pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 3c psubs\.sb r12,r12,r12
++ *[0-9a-f]*: ea 05 23 35 psubs\.sb r5,r5,r5
++ *[0-9a-f]*: e8 04 23 34 psubs\.sb r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 3e psubs\.sb lr,lr,lr
++ *[0-9a-f]*: ec 08 23 37 psubs\.sb r7,r6,r8
++ *[0-9a-f]*: f4 09 23 3c psubs\.sb r12,r10,r9
++ *[0-9a-f]*: f6 00 23 3f psubs\.sb pc,r11,r0
++
++[0-9a-f]* <padds_ub>:
++ *[0-9a-f]*: fe 0f 23 4f padds\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 4c padds\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 23 45 padds\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 23 44 padds\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 4e padds\.ub lr,lr,lr
++ *[0-9a-f]*: e4 0b 23 43 padds\.ub r3,r2,r11
++ *[0-9a-f]*: f0 01 23 4a padds\.ub r10,r8,r1
++ *[0-9a-f]*: f0 0a 23 4b padds\.ub r11,r8,r10
++
++[0-9a-f]* <psubs_ub>:
++ *[0-9a-f]*: fe 0f 23 5f psubs\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 5c psubs\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 23 55 psubs\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 23 54 psubs\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 5e psubs\.ub lr,lr,lr
++ *[0-9a-f]*: e4 07 23 50 psubs\.ub r0,r2,r7
++ *[0-9a-f]*: ea 03 23 5e psubs\.ub lr,r5,r3
++ *[0-9a-f]*: ee 09 23 56 psubs\.ub r6,r7,r9
++
++[0-9a-f]* <paddh_ub>:
++ *[0-9a-f]*: fe 0f 23 6f paddh\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 6c paddh\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 23 65 paddh\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 23 64 paddh\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 6e paddh\.ub lr,lr,lr
++ *[0-9a-f]*: e2 00 23 6e paddh\.ub lr,r1,r0
++ *[0-9a-f]*: ee 07 23 62 paddh\.ub r2,r7,r7
++ *[0-9a-f]*: e2 02 23 62 paddh\.ub r2,r1,r2
++
++[0-9a-f]* <psubh_ub>:
++ *[0-9a-f]*: fe 0f 23 7f psubh\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 7c psubh\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 23 75 psubh\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 23 74 psubh\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 7e psubh\.ub lr,lr,lr
++ *[0-9a-f]*: e2 06 23 70 psubh\.ub r0,r1,r6
++ *[0-9a-f]*: fc 0a 23 74 psubh\.ub r4,lr,r10
++ *[0-9a-f]*: f0 01 23 79 psubh\.ub r9,r8,r1
++
++[0-9a-f]* <pmax_ub>:
++ *[0-9a-f]*: fe 0f 23 8f pmax\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 8c pmax\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 23 85 pmax\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 23 84 pmax\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 8e pmax\.ub lr,lr,lr
++ *[0-9a-f]*: e4 0b 23 8f pmax\.ub pc,r2,r11
++ *[0-9a-f]*: e2 01 23 8c pmax\.ub r12,r1,r1
++ *[0-9a-f]*: e4 00 23 85 pmax\.ub r5,r2,r0
++
++[0-9a-f]* <pmax_sh>:
++ *[0-9a-f]*: fe 0f 23 9f pmax\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 9c pmax\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 23 95 pmax\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 23 94 pmax\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 9e pmax\.sh lr,lr,lr
++ *[0-9a-f]*: ec 0c 23 9e pmax\.sh lr,r6,r12
++ *[0-9a-f]*: fe 05 23 92 pmax\.sh r2,pc,r5
++ *[0-9a-f]*: e4 07 23 9f pmax\.sh pc,r2,r7
++
++[0-9a-f]* <pmin_ub>:
++ *[0-9a-f]*: fe 0f 23 af pmin\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 ac pmin\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 23 a5 pmin\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 23 a4 pmin\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 ae pmin\.ub lr,lr,lr
++ *[0-9a-f]*: e2 05 23 a8 pmin\.ub r8,r1,r5
++ *[0-9a-f]*: f0 03 23 a1 pmin\.ub r1,r8,r3
++ *[0-9a-f]*: e4 07 23 a0 pmin\.ub r0,r2,r7
++
++[0-9a-f]* <pmin_sh>:
++ *[0-9a-f]*: fe 0f 23 bf pmin\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 bc pmin\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 23 b5 pmin\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 23 b4 pmin\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 be pmin\.sh lr,lr,lr
++ *[0-9a-f]*: e8 0a 23 b8 pmin\.sh r8,r4,r10
++ *[0-9a-f]*: f4 0c 23 be pmin\.sh lr,r10,r12
++ *[0-9a-f]*: ec 02 23 b2 pmin\.sh r2,r6,r2
++
++[0-9a-f]* <pavg_ub>:
++ *[0-9a-f]*: fe 0f 23 cf pavg\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 cc pavg\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 23 c5 pavg\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 23 c4 pavg\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 ce pavg\.ub lr,lr,lr
++ *[0-9a-f]*: e2 06 23 c0 pavg\.ub r0,r1,r6
++ *[0-9a-f]*: e6 06 23 c8 pavg\.ub r8,r3,r6
++ *[0-9a-f]*: f8 0a 23 cf pavg\.ub pc,r12,r10
++
++[0-9a-f]* <pavg_sh>:
++ *[0-9a-f]*: fe 0f 23 df pavg\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 23 dc pavg\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 23 d5 pavg\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 23 d4 pavg\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 23 de pavg\.sh lr,lr,lr
++ *[0-9a-f]*: fe 0d 23 d9 pavg\.sh r9,pc,sp
++ *[0-9a-f]*: fa 03 23 df pavg\.sh pc,sp,r3
++ *[0-9a-f]*: e2 09 23 d6 pavg\.sh r6,r1,r9
++
++[0-9a-f]* <pabs_sb>:
++ *[0-9a-f]*: e0 0f 23 ef pabs\.sb pc,pc
++ *[0-9a-f]*: e0 0c 23 ec pabs\.sb r12,r12
++ *[0-9a-f]*: e0 05 23 e5 pabs\.sb r5,r5
++ *[0-9a-f]*: e0 04 23 e4 pabs\.sb r4,r4
++ *[0-9a-f]*: e0 0e 23 ee pabs\.sb lr,lr
++ *[0-9a-f]*: e0 06 23 eb pabs\.sb r11,r6
++ *[0-9a-f]*: e0 09 23 ee pabs\.sb lr,r9
++ *[0-9a-f]*: e0 07 23 ed pabs\.sb sp,r7
++
++[0-9a-f]* <pabs_sh>:
++ *[0-9a-f]*: e0 0f 23 ff pabs\.sh pc,pc
++ *[0-9a-f]*: e0 0c 23 fc pabs\.sh r12,r12
++ *[0-9a-f]*: e0 05 23 f5 pabs\.sh r5,r5
++ *[0-9a-f]*: e0 04 23 f4 pabs\.sh r4,r4
++ *[0-9a-f]*: e0 0e 23 fe pabs\.sh lr,lr
++ *[0-9a-f]*: e0 03 23 ff pabs\.sh pc,r3
++ *[0-9a-f]*: e0 07 23 f5 pabs\.sh r5,r7
++ *[0-9a-f]*: e0 00 23 f4 pabs\.sh r4,r0
++
++[0-9a-f]* <psad>:
++ *[0-9a-f]*: fe 0f 24 0f psad pc,pc,pc
++ *[0-9a-f]*: f8 0c 24 0c psad r12,r12,r12
++ *[0-9a-f]*: ea 05 24 05 psad r5,r5,r5
++ *[0-9a-f]*: e8 04 24 04 psad r4,r4,r4
++ *[0-9a-f]*: fc 0e 24 0e psad lr,lr,lr
++ *[0-9a-f]*: f6 0b 24 09 psad r9,r11,r11
++ *[0-9a-f]*: e8 0d 24 0e psad lr,r4,sp
++ *[0-9a-f]*: e8 05 24 0e psad lr,r4,r5
++
++[0-9a-f]* <pasr_b>:
++ *[0-9a-f]*: fe 00 24 1f pasr\.b pc,pc,0x0
++ *[0-9a-f]*: f8 07 24 1c pasr\.b r12,r12,0x7
++ *[0-9a-f]*: ea 04 24 15 pasr\.b r5,r5,0x4
++ *[0-9a-f]*: e8 03 24 14 pasr\.b r4,r4,0x3
++ *[0-9a-f]*: fc 01 24 1e pasr\.b lr,lr,0x1
++ *[0-9a-f]*: ee 01 24 1f pasr\.b pc,r7,0x1
++ *[0-9a-f]*: fc 06 24 1d pasr\.b sp,lr,0x6
++ *[0-9a-f]*: e6 02 24 1d pasr\.b sp,r3,0x2
++
++[0-9a-f]* <plsl_b>:
++ *[0-9a-f]*: fe 00 24 2f plsl\.b pc,pc,0x0
++ *[0-9a-f]*: f8 07 24 2c plsl\.b r12,r12,0x7
++ *[0-9a-f]*: ea 04 24 25 plsl\.b r5,r5,0x4
++ *[0-9a-f]*: e8 03 24 24 plsl\.b r4,r4,0x3
++ *[0-9a-f]*: fc 01 24 2e plsl\.b lr,lr,0x1
++ *[0-9a-f]*: f6 04 24 22 plsl\.b r2,r11,0x4
++ *[0-9a-f]*: ea 07 24 28 plsl\.b r8,r5,0x7
++ *[0-9a-f]*: e0 02 24 2f plsl\.b pc,r0,0x2
++
++[0-9a-f]* <plsr_b>:
++ *[0-9a-f]*: fe 00 24 3f plsr\.b pc,pc,0x0
++ *[0-9a-f]*: f8 07 24 3c plsr\.b r12,r12,0x7
++ *[0-9a-f]*: ea 04 24 35 plsr\.b r5,r5,0x4
++ *[0-9a-f]*: e8 03 24 34 plsr\.b r4,r4,0x3
++ *[0-9a-f]*: fc 01 24 3e plsr\.b lr,lr,0x1
++ *[0-9a-f]*: e2 02 24 3c plsr\.b r12,r1,0x2
++ *[0-9a-f]*: fe 07 24 36 plsr\.b r6,pc,0x7
++ *[0-9a-f]*: f6 02 24 3c plsr\.b r12,r11,0x2
++
++[0-9a-f]* <pasr_h>:
++ *[0-9a-f]*: fe 00 24 4f pasr\.h pc,pc,0x0
++ *[0-9a-f]*: f8 0f 24 4c pasr\.h r12,r12,0xf
++ *[0-9a-f]*: ea 08 24 45 pasr\.h r5,r5,0x8
++ *[0-9a-f]*: e8 07 24 44 pasr\.h r4,r4,0x7
++ *[0-9a-f]*: fc 01 24 4e pasr\.h lr,lr,0x1
++ *[0-9a-f]*: f6 0a 24 40 pasr\.h r0,r11,0xa
++ *[0-9a-f]*: ec 08 24 44 pasr\.h r4,r6,0x8
++ *[0-9a-f]*: e4 04 24 46 pasr\.h r6,r2,0x4
++
++[0-9a-f]* <plsl_h>:
++ *[0-9a-f]*: fe 00 24 5f plsl\.h pc,pc,0x0
++ *[0-9a-f]*: f8 0f 24 5c plsl\.h r12,r12,0xf
++ *[0-9a-f]*: ea 08 24 55 plsl\.h r5,r5,0x8
++ *[0-9a-f]*: e8 07 24 54 plsl\.h r4,r4,0x7
++ *[0-9a-f]*: fc 01 24 5e plsl\.h lr,lr,0x1
++ *[0-9a-f]*: f4 09 24 55 plsl\.h r5,r10,0x9
++ *[0-9a-f]*: fc 08 24 5d plsl\.h sp,lr,0x8
++ *[0-9a-f]*: fc 07 24 50 plsl\.h r0,lr,0x7
++
++[0-9a-f]* <plsr_h>:
++ *[0-9a-f]*: fe 00 24 6f plsr\.h pc,pc,0x0
++ *[0-9a-f]*: f8 0f 24 6c plsr\.h r12,r12,0xf
++ *[0-9a-f]*: ea 08 24 65 plsr\.h r5,r5,0x8
++ *[0-9a-f]*: e8 07 24 64 plsr\.h r4,r4,0x7
++ *[0-9a-f]*: fc 01 24 6e plsr\.h lr,lr,0x1
++ *[0-9a-f]*: e0 0f 24 6b plsr\.h r11,r0,0xf
++ *[0-9a-f]*: e6 03 24 6e plsr\.h lr,r3,0x3
++ *[0-9a-f]*: fc 0a 24 68 plsr\.h r8,lr,0xa
++
++[0-9a-f]* <packw_sh>:
++ *[0-9a-f]*: fe 0f 24 7f packw\.sh pc,pc,pc
++ *[0-9a-f]*: f8 0c 24 7c packw\.sh r12,r12,r12
++ *[0-9a-f]*: ea 05 24 75 packw\.sh r5,r5,r5
++ *[0-9a-f]*: e8 04 24 74 packw\.sh r4,r4,r4
++ *[0-9a-f]*: fc 0e 24 7e packw\.sh lr,lr,lr
++ *[0-9a-f]*: f6 0a 24 7d packw\.sh sp,r11,r10
++ *[0-9a-f]*: e4 0c 24 78 packw\.sh r8,r2,r12
++ *[0-9a-f]*: e2 05 24 78 packw\.sh r8,r1,r5
++
++[0-9a-f]* <punpckub_h>:
++ *[0-9a-f]*: fe 00 24 8f punpckub\.h pc,pc:b
++ *[0-9a-f]*: f8 00 24 9c punpckub\.h r12,r12:t
++ *[0-9a-f]*: ea 00 24 95 punpckub\.h r5,r5:t
++ *[0-9a-f]*: e8 00 24 84 punpckub\.h r4,r4:b
++ *[0-9a-f]*: fc 00 24 9e punpckub\.h lr,lr:t
++ *[0-9a-f]*: e2 00 24 96 punpckub\.h r6,r1:t
++ *[0-9a-f]*: ea 00 24 8e punpckub\.h lr,r5:b
++ *[0-9a-f]*: e4 00 24 9e punpckub\.h lr,r2:t
++
++[0-9a-f]* <punpcksb_h>:
++ *[0-9a-f]*: fe 00 24 af punpcksb\.h pc,pc:b
++ *[0-9a-f]*: f8 00 24 bc punpcksb\.h r12,r12:t
++ *[0-9a-f]*: ea 00 24 b5 punpcksb\.h r5,r5:t
++ *[0-9a-f]*: e8 00 24 a4 punpcksb\.h r4,r4:b
++ *[0-9a-f]*: fc 00 24 be punpcksb\.h lr,lr:t
++ *[0-9a-f]*: ee 00 24 b4 punpcksb\.h r4,r7:t
++ *[0-9a-f]*: fc 00 24 a6 punpcksb\.h r6,lr:b
++ *[0-9a-f]*: f8 00 24 bc punpcksb\.h r12,r12:t
++
++[0-9a-f]* <packsh_ub>:
++ *[0-9a-f]*: fe 0f 24 cf packsh\.ub pc,pc,pc
++ *[0-9a-f]*: f8 0c 24 cc packsh\.ub r12,r12,r12
++ *[0-9a-f]*: ea 05 24 c5 packsh\.ub r5,r5,r5
++ *[0-9a-f]*: e8 04 24 c4 packsh\.ub r4,r4,r4
++ *[0-9a-f]*: fc 0e 24 ce packsh\.ub lr,lr,lr
++ *[0-9a-f]*: ec 03 24 c3 packsh\.ub r3,r6,r3
++ *[0-9a-f]*: e0 03 24 c8 packsh\.ub r8,r0,r3
++ *[0-9a-f]*: e6 0e 24 c9 packsh\.ub r9,r3,lr
++
++[0-9a-f]* <packsh_sb>:
++ *[0-9a-f]*: fe 0f 24 df packsh\.sb pc,pc,pc
++ *[0-9a-f]*: f8 0c 24 dc packsh\.sb r12,r12,r12
++ *[0-9a-f]*: ea 05 24 d5 packsh\.sb r5,r5,r5
++ *[0-9a-f]*: e8 04 24 d4 packsh\.sb r4,r4,r4
++ *[0-9a-f]*: fc 0e 24 de packsh\.sb lr,lr,lr
++ *[0-9a-f]*: f0 01 24 d6 packsh\.sb r6,r8,r1
++ *[0-9a-f]*: f2 08 24 de packsh\.sb lr,r9,r8
++ *[0-9a-f]*: ec 06 24 dd packsh\.sb sp,r6,r6
++
++[0-9a-f]* <andl>:
++ *[0-9a-f]*: e0 1f 00 00 andl pc,0x0
++ *[0-9a-f]*: e0 1c ff ff andl r12,0xffff
++ *[0-9a-f]*: e0 15 80 00 andl r5,0x8000
++ *[0-9a-f]*: e0 14 7f ff andl r4,0x7fff
++ *[0-9a-f]*: e0 1e 00 01 andl lr,0x1
++ *[0-9a-f]*: e0 1f 5a 58 andl pc,0x5a58
++ *[0-9a-f]*: e0 18 b8 9e andl r8,0xb89e
++ *[0-9a-f]*: e0 17 35 97 andl r7,0x3597
++
++[0-9a-f]* <andl_coh>:
++ *[0-9a-f]*: e2 1f 00 00 andl pc,0x0,COH
++ *[0-9a-f]*: e2 1c ff ff andl r12,0xffff,COH
++ *[0-9a-f]*: e2 15 80 00 andl r5,0x8000,COH
++ *[0-9a-f]*: e2 14 7f ff andl r4,0x7fff,COH
++ *[0-9a-f]*: e2 1e 00 01 andl lr,0x1,COH
++ *[0-9a-f]*: e2 16 58 e1 andl r6,0x58e1,COH
++ *[0-9a-f]*: e2 10 9e cd andl r0,0x9ecd,COH
++ *[0-9a-f]*: e2 14 bd c4 andl r4,0xbdc4,COH
++
++[0-9a-f]* <andh>:
++ *[0-9a-f]*: e4 1f 00 00 andh pc,0x0
++ *[0-9a-f]*: e4 1c ff ff andh r12,0xffff
++ *[0-9a-f]*: e4 15 80 00 andh r5,0x8000
++ *[0-9a-f]*: e4 14 7f ff andh r4,0x7fff
++ *[0-9a-f]*: e4 1e 00 01 andh lr,0x1
++ *[0-9a-f]*: e4 1c cc 58 andh r12,0xcc58
++ *[0-9a-f]*: e4 13 21 e3 andh r3,0x21e3
++ *[0-9a-f]*: e4 12 a7 eb andh r2,0xa7eb
++
++[0-9a-f]* <andh_coh>:
++ *[0-9a-f]*: e6 1f 00 00 andh pc,0x0,COH
++ *[0-9a-f]*: e6 1c ff ff andh r12,0xffff,COH
++ *[0-9a-f]*: e6 15 80 00 andh r5,0x8000,COH
++ *[0-9a-f]*: e6 14 7f ff andh r4,0x7fff,COH
++ *[0-9a-f]*: e6 1e 00 01 andh lr,0x1,COH
++ *[0-9a-f]*: e6 1b 86 0d andh r11,0x860d,COH
++ *[0-9a-f]*: e6 18 ce f6 andh r8,0xcef6,COH
++ *[0-9a-f]*: e6 1a 5c 83 andh r10,0x5c83,COH
++
++[0-9a-f]* <orl>:
++ *[0-9a-f]*: e8 1f 00 00 orl pc,0x0
++ *[0-9a-f]*: e8 1c ff ff orl r12,0xffff
++ *[0-9a-f]*: e8 15 80 00 orl r5,0x8000
++ *[0-9a-f]*: e8 14 7f ff orl r4,0x7fff
++ *[0-9a-f]*: e8 1e 00 01 orl lr,0x1
++ *[0-9a-f]*: e8 1d 41 7e orl sp,0x417e
++ *[0-9a-f]*: e8 10 52 bd orl r0,0x52bd
++ *[0-9a-f]*: e8 1f ac 47 orl pc,0xac47
++
++[0-9a-f]* <orh>:
++ *[0-9a-f]*: ea 1f 00 00 orh pc,0x0
++ *[0-9a-f]*: ea 1c ff ff orh r12,0xffff
++ *[0-9a-f]*: ea 15 80 00 orh r5,0x8000
++ *[0-9a-f]*: ea 14 7f ff orh r4,0x7fff
++ *[0-9a-f]*: ea 1e 00 01 orh lr,0x1
++ *[0-9a-f]*: ea 18 6e 7d orh r8,0x6e7d
++ *[0-9a-f]*: ea 1c 77 1c orh r12,0x771c
++ *[0-9a-f]*: ea 11 ea 1a orh r1,0xea1a
++
++[0-9a-f]* <eorl>:
++ *[0-9a-f]*: ec 1f 00 00 eorl pc,0x0
++ *[0-9a-f]*: ec 1c ff ff eorl r12,0xffff
++ *[0-9a-f]*: ec 15 80 00 eorl r5,0x8000
++ *[0-9a-f]*: ec 14 7f ff eorl r4,0x7fff
++ *[0-9a-f]*: ec 1e 00 01 eorl lr,0x1
++ *[0-9a-f]*: ec 14 c7 b9 eorl r4,0xc7b9
++ *[0-9a-f]*: ec 16 fb dd eorl r6,0xfbdd
++ *[0-9a-f]*: ec 11 51 b1 eorl r1,0x51b1
++
++[0-9a-f]* <eorh>:
++ *[0-9a-f]*: ee 1f 00 00 eorh pc,0x0
++ *[0-9a-f]*: ee 1c ff ff eorh r12,0xffff
++ *[0-9a-f]*: ee 15 80 00 eorh r5,0x8000
++ *[0-9a-f]*: ee 14 7f ff eorh r4,0x7fff
++ *[0-9a-f]*: ee 1e 00 01 eorh lr,0x1
++ *[0-9a-f]*: ee 10 2d d4 eorh r0,0x2dd4
++ *[0-9a-f]*: ee 1a 94 b5 eorh r10,0x94b5
++ *[0-9a-f]*: ee 19 df 2a eorh r9,0xdf2a
++
++[0-9a-f]* <mcall>:
++ *[0-9a-f]*: f0 1f 00 00 mcall [0-9a-f]* <.*>
++ *[0-9a-f]*: f0 1c ff ff mcall r12\[-4\]
++ *[0-9a-f]*: f0 15 80 00 mcall r5\[-131072\]
++ *[0-9a-f]*: f0 14 7f ff mcall r4\[131068\]
++ *[0-9a-f]*: f0 1e 00 01 mcall lr\[4\]
++ *[0-9a-f]*: f0 1d 3b bf mcall sp\[61180\]
++ *[0-9a-f]*: f0 14 dd d2 mcall r4\[-35000\]
++ *[0-9a-f]*: f0 10 09 b1 mcall r0\[9924\]
++
++[0-9a-f]* <pref>:
++ *[0-9a-f]*: f2 1f 00 00 pref pc\[0\]
++ *[0-9a-f]*: f2 1c ff ff pref r12\[-1\]
++ *[0-9a-f]*: f2 15 80 00 pref r5\[-32768\]
++ *[0-9a-f]*: f2 14 7f ff pref r4\[32767\]
++ *[0-9a-f]*: f2 1e 00 01 pref lr\[1\]
++ *[0-9a-f]*: f2 17 1e 44 pref r7\[7748\]
++ *[0-9a-f]*: f2 17 e1 ed pref r7\[-7699\]
++ *[0-9a-f]*: f2 12 9a dc pref r2\[-25892\]
++
++[0-9a-f]* <cache>:
++ *[0-9a-f]*: f4 1f 00 00 cache pc\[0\],0x0
++ *[0-9a-f]*: f4 1c ff ff cache r12\[-1\],0x1f
++ *[0-9a-f]*: f4 15 84 00 cache r5\[-1024\],0x10
++ *[0-9a-f]*: f4 14 7b ff cache r4\[1023\],0xf
++ *[0-9a-f]*: f4 1e 08 01 cache lr\[1\],0x1
++ *[0-9a-f]*: f4 13 8c 3c cache r3\[-964\],0x11
++ *[0-9a-f]*: f4 14 b6 89 cache r4\[-375\],0x16
++ *[0-9a-f]*: f4 13 8c 88 cache r3\[-888\],0x11
++
++[0-9a-f]* <sub4>:
++ *[0-9a-f]*: 20 0f sub pc,0
++ *[0-9a-f]*: 2f fc sub r12,-1
++ *[0-9a-f]*: f0 25 00 00 sub r5,-1048576
++ *[0-9a-f]*: ee 34 ff ff sub r4,1048575
++ *[0-9a-f]*: 20 1e sub lr,1
++ *[0-9a-f]*: f6 22 8d 6c sub r2,-619156
++ *[0-9a-f]*: e6 3e 0a cd sub lr,461517
++ *[0-9a-f]*: fc 38 2d 25 sub r8,-185051
++
++[0-9a-f]* <cp3>:
++ *[0-9a-f]*: 58 0f cp.w pc,0
++ *[0-9a-f]*: 5b fc cp.w r12,-1
++ *[0-9a-f]*: f0 45 00 00 cp.w r5,-1048576
++ *[0-9a-f]*: ee 54 ff ff cp.w r4,1048575
++ *[0-9a-f]*: 58 1e cp.w lr,1
++ *[0-9a-f]*: e0 51 e4 ae cp.w r1,124078
++ *[0-9a-f]*: fa 40 37 e3 cp.w r0,-378909
++ *[0-9a-f]*: fc 44 4a 14 cp.w r4,-243180
++
++[0-9a-f]* <mov2>:
++ *[0-9a-f]*: 30 0f mov pc,0
++ *[0-9a-f]*: 3f fc mov r12,-1
++ *[0-9a-f]*: f0 65 00 00 mov r5,-1048576
++ *[0-9a-f]*: ee 74 ff ff mov r4,1048575
++ *[0-9a-f]*: 30 1e mov lr,1
++ *[0-9a-f]*: fa 75 29 a3 mov r5,-317021
++ *[0-9a-f]*: f4 6d 91 94 mov sp,-749164
++ *[0-9a-f]*: ee 65 58 93 mov r5,940179
++
++[0-9a-f]* <brc2>:
++ *[0-9a-f]*: c0 00 breq [0-9a-f]* <.*>
++ *[0-9a-f]*: fe 9f ff ff bral [0-9a-f]* <.*>
++ *[0-9a-f]*: f0 88 00 00 brls [0-9a-f]* <.*>
++ *[0-9a-f]*: ee 97 ff ff brpl [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 11 brne [0-9a-f]* <.*>
++ *[0-9a-f]*: f2 8b 4a 4d brhi [0-9a-f]* <.*>
++ *[0-9a-f]*: ea 8e 14 cc brqs [0-9a-f]* <.*>
++ *[0-9a-f]*: fa 98 98 33 brls [0-9a-f]* <.*>
++
++[0-9a-f]* <rcall2>:
++ *[0-9a-f]*: c0 0c rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: cf ff rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: f0 a0 00 00 rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: ee b0 ff ff rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: c0 1c rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: e2 b0 ca 5a rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: e8 a0 47 52 rcall [0-9a-f]* <.*>
++ *[0-9a-f]*: fe b0 fd ef rcall [0-9a-f]* <.*>
++
++[0-9a-f]* <sub5>:
++ *[0-9a-f]*: fe cf 00 00 sub pc,pc,0
++ *[0-9a-f]*: f8 cc ff ff sub r12,r12,-1
++ *[0-9a-f]*: ea c5 80 00 sub r5,r5,-32768
++ *[0-9a-f]*: e8 c4 7f ff sub r4,r4,32767
++ *[0-9a-f]*: fc ce 00 01 sub lr,lr,1
++ *[0-9a-f]*: fe cf ce 38 sub pc,pc,-12744
++ *[0-9a-f]*: ee c7 95 1b sub r7,r7,-27365
++ *[0-9a-f]*: f2 c2 bc 32 sub r2,r9,-17358
++
++[0-9a-f]* <satsub_w2>:
++ *[0-9a-f]*: fe df 00 00 satsub\.w pc,pc,0
++ *[0-9a-f]*: f8 dc ff ff satsub\.w r12,r12,-1
++ *[0-9a-f]*: ea d5 80 00 satsub\.w r5,r5,-32768
++ *[0-9a-f]*: e8 d4 7f ff satsub\.w r4,r4,32767
++ *[0-9a-f]*: fc de 00 01 satsub\.w lr,lr,1
++ *[0-9a-f]*: fc d2 f8 29 satsub\.w r2,lr,-2007
++ *[0-9a-f]*: f8 d7 fc f0 satsub\.w r7,r12,-784
++ *[0-9a-f]*: ee d4 5a 8c satsub\.w r4,r7,23180
++
++[0-9a-f]* <ld_d4>:
++ *[0-9a-f]*: fe e0 00 00 ld\.d r0,pc\[0\]
++ *[0-9a-f]*: f8 ee ff ff ld\.d lr,r12\[-1\]
++ *[0-9a-f]*: ea e8 80 00 ld\.d r8,r5\[-32768\]
++ *[0-9a-f]*: e8 e6 7f ff ld\.d r6,r4\[32767\]
++ *[0-9a-f]*: fc e2 00 01 ld\.d r2,lr\[1\]
++ *[0-9a-f]*: f6 ee 39 c0 ld\.d lr,r11\[14784\]
++ *[0-9a-f]*: f2 e6 b6 27 ld\.d r6,r9\[-18905\]
++ *[0-9a-f]*: e6 e2 e7 2d ld\.d r2,r3\[-6355\]
++
++[0-9a-f]* <ld_w4>:
++ *[0-9a-f]*: 7e 0f ld\.w pc,pc\[0x0\]
++ *[0-9a-f]*: f8 fc ff ff ld\.w r12,r12\[-1\]
++ *[0-9a-f]*: ea f5 80 00 ld\.w r5,r5\[-32768\]
++ *[0-9a-f]*: e8 f4 7f ff ld\.w r4,r4\[32767\]
++ *[0-9a-f]*: fc fe 00 01 ld\.w lr,lr\[1\]
++ *[0-9a-f]*: f8 f0 a9 8b ld\.w r0,r12\[-22133\]
++ *[0-9a-f]*: fe fd af d7 ld\.w sp,pc\[-20521\]
++ *[0-9a-f]*: d7 03 nop
++
++[0-9a-f]* <ld_sh4>:
++ *[0-9a-f]*: 9e 0f ld\.sh pc,pc\[0x0\]
++ *[0-9a-f]*: f9 0c ff ff ld\.sh r12,r12\[-1\]
++ *[0-9a-f]*: eb 05 80 00 ld\.sh r5,r5\[-32768\]
++ *[0-9a-f]*: e9 04 7f ff ld\.sh r4,r4\[32767\]
++ *[0-9a-f]*: fd 0e 00 01 ld\.sh lr,lr\[1\]
++ *[0-9a-f]*: f5 06 78 d2 ld\.sh r6,r10\[30930\]
++ *[0-9a-f]*: f5 06 55 d5 ld\.sh r6,r10\[21973\]
++ *[0-9a-f]*: d7 03 nop
++
++[0-9a-f]* <ld_uh4>:
++ *[0-9a-f]*: 9e 8f ld\.uh pc,pc\[0x0\]
++ *[0-9a-f]*: f9 1c ff ff ld\.uh r12,r12\[-1\]
++ *[0-9a-f]*: eb 15 80 00 ld\.uh r5,r5\[-32768\]
++ *[0-9a-f]*: e9 14 7f ff ld\.uh r4,r4\[32767\]
++ *[0-9a-f]*: fd 1e 00 01 ld\.uh lr,lr\[1\]
++ *[0-9a-f]*: f3 11 cb d6 ld\.uh r1,r9\[-13354\]
++ *[0-9a-f]*: f7 1e 53 59 ld\.uh lr,r11\[21337\]
++ *[0-9a-f]*: d7 03 nop
++
++[0-9a-f]* <ld_sb1>:
++ *[0-9a-f]*: ff 2f 00 00 ld\.sb pc,pc\[0\]
++ *[0-9a-f]*: f9 2c ff ff ld\.sb r12,r12\[-1\]
++ *[0-9a-f]*: eb 25 80 00 ld\.sb r5,r5\[-32768\]
++ *[0-9a-f]*: e9 24 7f ff ld\.sb r4,r4\[32767\]
++ *[0-9a-f]*: fd 2e 00 01 ld\.sb lr,lr\[1\]
++ *[0-9a-f]*: fb 27 90 09 ld\.sb r7,sp\[-28663\]
++ *[0-9a-f]*: e3 22 e9 09 ld\.sb r2,r1\[-5879\]
++ *[0-9a-f]*: e7 2c 49 2e ld\.sb r12,r3\[18734\]
++
++[0-9a-f]* <ld_ub4>:
++ *[0-9a-f]*: 1f 8f ld\.ub pc,pc\[0x0\]
++ *[0-9a-f]*: f9 3c ff ff ld\.ub r12,r12\[-1\]
++ *[0-9a-f]*: eb 35 80 00 ld\.ub r5,r5\[-32768\]
++ *[0-9a-f]*: e9 34 7f ff ld\.ub r4,r4\[32767\]
++ *[0-9a-f]*: 1d 9e ld\.ub lr,lr\[0x1\]
++ *[0-9a-f]*: e9 3f 20 55 ld\.ub pc,r4\[8277\]
++ *[0-9a-f]*: f9 35 4a e4 ld\.ub r5,r12\[19172\]
++ *[0-9a-f]*: fd 3a 66 eb ld\.ub r10,lr\[26347\]
++
++[0-9a-f]* <st_d4>:
++ *[0-9a-f]*: fe e1 00 00 st\.d pc\[0\],r0
++ *[0-9a-f]*: f8 ef ff ff st\.d r12\[-1\],lr
++ *[0-9a-f]*: ea e9 80 00 st\.d r5\[-32768\],r8
++ *[0-9a-f]*: e8 e7 7f ff st\.d r4\[32767\],r6
++ *[0-9a-f]*: fc e3 00 01 st\.d lr\[1\],r2
++ *[0-9a-f]*: ea eb 33 90 st\.d r5\[13200\],r10
++ *[0-9a-f]*: ea eb 24 88 st\.d r5\[9352\],r10
++ *[0-9a-f]*: ea e5 7e 75 st\.d r5\[32373\],r4
++
++[0-9a-f]* <st_w4>:
++ *[0-9a-f]*: 9f 0f st\.w pc\[0x0\],pc
++ *[0-9a-f]*: f9 4c ff ff st\.w r12\[-1\],r12
++ *[0-9a-f]*: eb 45 80 00 st\.w r5\[-32768\],r5
++ *[0-9a-f]*: e9 44 7f ff st\.w r4\[32767\],r4
++ *[0-9a-f]*: fd 4e 00 01 st\.w lr\[1\],lr
++ *[0-9a-f]*: fb 47 17 f8 st\.w sp\[6136\],r7
++ *[0-9a-f]*: ed 4c 69 cf st\.w r6\[27087\],r12
++ *[0-9a-f]*: d7 03 nop
++
++[0-9a-f]* <st_h4>:
++ *[0-9a-f]*: be 0f st\.h pc\[0x0\],pc
++ *[0-9a-f]*: f9 5c ff ff st\.h r12\[-1\],r12
++ *[0-9a-f]*: eb 55 80 00 st\.h r5\[-32768\],r5
++ *[0-9a-f]*: e9 54 7f ff st\.h r4\[32767\],r4
++ *[0-9a-f]*: fd 5e 00 01 st\.h lr\[1\],lr
++ *[0-9a-f]*: e9 57 d9 16 st\.h r4\[-9962\],r7
++ *[0-9a-f]*: f3 53 c0 86 st\.h r9\[-16250\],r3
++ *[0-9a-f]*: d7 03 nop
++
++[0-9a-f]* <st_b4>:
++ *[0-9a-f]*: be 8f st\.b pc\[0x0\],pc
++ *[0-9a-f]*: f9 6c ff ff st\.b r12\[-1\],r12
++ *[0-9a-f]*: eb 65 80 00 st\.b r5\[-32768\],r5
++ *[0-9a-f]*: e9 64 7f ff st\.b r4\[32767\],r4
++ *[0-9a-f]*: bc 9e st\.b lr\[0x1\],lr
++ *[0-9a-f]*: f9 66 75 96 st\.b r12\[30102\],r6
++ *[0-9a-f]*: eb 61 71 31 st\.b r5\[28977\],r1
++ *[0-9a-f]*: e1 61 15 5e st\.b r0\[5470\],r1
++
++[0-9a-f]* <mfsr>:
++ *[0-9a-f]*: e1 bf 00 00 mfsr pc,0x0
++ *[0-9a-f]*: e1 bc 00 ff mfsr r12,0x3fc
++ *[0-9a-f]*: e1 b5 00 80 mfsr r5,0x200
++ *[0-9a-f]*: e1 b4 00 7f mfsr r4,0x1fc
++ *[0-9a-f]*: e1 be 00 01 mfsr lr,0x4
++ *[0-9a-f]*: e1 b2 00 ae mfsr r2,0x2b8
++ *[0-9a-f]*: e1 b4 00 41 mfsr r4,0x104
++ *[0-9a-f]*: e1 ba 00 fe mfsr r10,0x3f8
++
++[0-9a-f]* <mtsr>:
++ *[0-9a-f]*: e3 bf 00 00 mtsr 0x0,pc
++ *[0-9a-f]*: e3 bc 00 ff mtsr 0x3fc,r12
++ *[0-9a-f]*: e3 b5 00 80 mtsr 0x200,r5
++ *[0-9a-f]*: e3 b4 00 7f mtsr 0x1fc,r4
++ *[0-9a-f]*: e3 be 00 01 mtsr 0x4,lr
++ *[0-9a-f]*: e3 ba 00 38 mtsr 0xe0,r10
++ *[0-9a-f]*: e3 bc 00 d1 mtsr 0x344,r12
++ *[0-9a-f]*: e3 b9 00 4c mtsr 0x130,r9
++
++[0-9a-f]* <mfdr>:
++ *[0-9a-f]*: e5 bf 00 00 mfdr pc,0x0
++ *[0-9a-f]*: e5 bc 00 ff mfdr r12,0x3fc
++ *[0-9a-f]*: e5 b5 00 80 mfdr r5,0x200
++ *[0-9a-f]*: e5 b4 00 7f mfdr r4,0x1fc
++ *[0-9a-f]*: e5 be 00 01 mfdr lr,0x4
++ *[0-9a-f]*: e5 b6 00 e9 mfdr r6,0x3a4
++ *[0-9a-f]*: e5 b5 00 09 mfdr r5,0x24
++ *[0-9a-f]*: e5 b9 00 4b mfdr r9,0x12c
++
++[0-9a-f]* <mtdr>:
++ *[0-9a-f]*: e7 bf 00 00 mtdr 0x0,pc
++ *[0-9a-f]*: e7 bc 00 ff mtdr 0x3fc,r12
++ *[0-9a-f]*: e7 b5 00 80 mtdr 0x200,r5
++ *[0-9a-f]*: e7 b4 00 7f mtdr 0x1fc,r4
++ *[0-9a-f]*: e7 be 00 01 mtdr 0x4,lr
++ *[0-9a-f]*: e7 b8 00 2d mtdr 0xb4,r8
++ *[0-9a-f]*: e7 ba 00 b4 mtdr 0x2d0,r10
++ *[0-9a-f]*: e7 be 00 66 mtdr 0x198,lr
++
++[0-9a-f]* <sleep>:
++ *[0-9a-f]*: e9 b0 00 00 sleep 0x0
++ *[0-9a-f]*: e9 b0 00 ff sleep 0xff
++ *[0-9a-f]*: e9 b0 00 80 sleep 0x80
++ *[0-9a-f]*: e9 b0 00 7f sleep 0x7f
++ *[0-9a-f]*: e9 b0 00 01 sleep 0x1
++ *[0-9a-f]*: e9 b0 00 fe sleep 0xfe
++ *[0-9a-f]*: e9 b0 00 0f sleep 0xf
++ *[0-9a-f]*: e9 b0 00 2b sleep 0x2b
++
++[0-9a-f]* <sync>:
++ *[0-9a-f]*: eb b0 00 00 sync 0x0
++ *[0-9a-f]*: eb b0 00 ff sync 0xff
++ *[0-9a-f]*: eb b0 00 80 sync 0x80
++ *[0-9a-f]*: eb b0 00 7f sync 0x7f
++ *[0-9a-f]*: eb b0 00 01 sync 0x1
++ *[0-9a-f]*: eb b0 00 a6 sync 0xa6
++ *[0-9a-f]*: eb b0 00 e6 sync 0xe6
++ *[0-9a-f]*: eb b0 00 b4 sync 0xb4
++
++[0-9a-f]* <bld>:
++ *[0-9a-f]*: ed bf 00 00 bld pc,0x0
++ *[0-9a-f]*: ed bc 00 1f bld r12,0x1f
++ *[0-9a-f]*: ed b5 00 10 bld r5,0x10
++ *[0-9a-f]*: ed b4 00 0f bld r4,0xf
++ *[0-9a-f]*: ed be 00 01 bld lr,0x1
++ *[0-9a-f]*: ed b9 00 0f bld r9,0xf
++ *[0-9a-f]*: ed b0 00 04 bld r0,0x4
++ *[0-9a-f]*: ed be 00 1a bld lr,0x1a
++
++[0-9a-f]* <bst>:
++ *[0-9a-f]*: ef bf 00 00 bst pc,0x0
++ *[0-9a-f]*: ef bc 00 1f bst r12,0x1f
++ *[0-9a-f]*: ef b5 00 10 bst r5,0x10
++ *[0-9a-f]*: ef b4 00 0f bst r4,0xf
++ *[0-9a-f]*: ef be 00 01 bst lr,0x1
++ *[0-9a-f]*: ef ba 00 1c bst r10,0x1c
++ *[0-9a-f]*: ef b0 00 03 bst r0,0x3
++ *[0-9a-f]*: ef bd 00 02 bst sp,0x2
++
++[0-9a-f]* <sats>:
++ *[0-9a-f]*: f1 bf 00 00 sats pc,0x0
++ *[0-9a-f]*: f1 bc 03 ff sats r12>>0x1f,0x1f
++ *[0-9a-f]*: f1 b5 02 10 sats r5>>0x10,0x10
++ *[0-9a-f]*: f1 b4 01 ef sats r4>>0xf,0xf
++ *[0-9a-f]*: f1 be 00 21 sats lr>>0x1,0x1
++ *[0-9a-f]*: f1 ba 02 63 sats r10>>0x3,0x13
++ *[0-9a-f]*: f1 ba 03 42 sats r10>>0x2,0x1a
++ *[0-9a-f]*: f1 b1 00 34 sats r1>>0x14,0x1
++
++[0-9a-f]* <satu>:
++ *[0-9a-f]*: f1 bf 04 00 satu pc,0x0
++ *[0-9a-f]*: f1 bc 07 ff satu r12>>0x1f,0x1f
++ *[0-9a-f]*: f1 b5 06 10 satu r5>>0x10,0x10
++ *[0-9a-f]*: f1 b4 05 ef satu r4>>0xf,0xf
++ *[0-9a-f]*: f1 be 04 21 satu lr>>0x1,0x1
++ *[0-9a-f]*: f1 bf 04 e5 satu pc>>0x5,0x7
++ *[0-9a-f]*: f1 b7 04 a5 satu r7>>0x5,0x5
++ *[0-9a-f]*: f1 b2 06 7a satu r2>>0x1a,0x13
++
++[0-9a-f]* <satrnds>:
++ *[0-9a-f]*: f3 bf 00 00 satrnds pc,0x0
++ *[0-9a-f]*: f3 bc 03 ff satrnds r12>>0x1f,0x1f
++ *[0-9a-f]*: f3 b5 02 10 satrnds r5>>0x10,0x10
++ *[0-9a-f]*: f3 b4 01 ef satrnds r4>>0xf,0xf
++ *[0-9a-f]*: f3 be 00 21 satrnds lr>>0x1,0x1
++ *[0-9a-f]*: f3 b0 02 75 satrnds r0>>0x15,0x13
++ *[0-9a-f]*: f3 bd 00 40 satrnds sp,0x2
++ *[0-9a-f]*: f3 b7 03 a6 satrnds r7>>0x6,0x1d
++
++[0-9a-f]* <satrndu>:
++ *[0-9a-f]*: f3 bf 04 00 satrndu pc,0x0
++ *[0-9a-f]*: f3 bc 07 ff satrndu r12>>0x1f,0x1f
++ *[0-9a-f]*: f3 b5 06 10 satrndu r5>>0x10,0x10
++ *[0-9a-f]*: f3 b4 05 ef satrndu r4>>0xf,0xf
++ *[0-9a-f]*: f3 be 04 21 satrndu lr>>0x1,0x1
++ *[0-9a-f]*: f3 bc 07 40 satrndu r12,0x1a
++ *[0-9a-f]*: f3 b4 04 75 satrndu r4>>0x15,0x3
++ *[0-9a-f]*: f3 ba 06 03 satrndu r10>>0x3,0x10
++
++[0-9a-f]* <subfc>:
++ *[0-9a-f]*: f5 bf 00 00 subfeq pc,0
++ *[0-9a-f]*: f5 bc 0f ff subfal r12,-1
++ *[0-9a-f]*: f5 b5 08 80 subfls r5,-128
++ *[0-9a-f]*: f5 b4 07 7f subfpl r4,127
++ *[0-9a-f]*: f5 be 01 01 subfne lr,1
++ *[0-9a-f]*: f5 ba 08 08 subfls r10,8
++ *[0-9a-f]*: f5 bb 0d 63 subfvc r11,99
++ *[0-9a-f]*: f5 b2 0c 49 subfvs r2,73
++
++[0-9a-f]* <subc>:
++ *[0-9a-f]*: f7 bf 00 00 subeq pc,0
++ *[0-9a-f]*: f7 bc 0f ff subal r12,-1
++ *[0-9a-f]*: f7 b5 08 80 subls r5,-128
++ *[0-9a-f]*: f7 b4 07 7f subpl r4,127
++ *[0-9a-f]*: f7 be 01 01 subne lr,1
++ *[0-9a-f]*: f7 bc 08 76 subls r12,118
++ *[0-9a-f]*: f7 be 0d f4 subvc lr,-12
++ *[0-9a-f]*: f7 b4 06 f3 submi r4,-13
++
++[0-9a-f]* <movc2>:
++ *[0-9a-f]*: f9 bf 00 00 moveq pc,0
++ *[0-9a-f]*: f9 bc 0f ff moval r12,-1
++ *[0-9a-f]*: f9 b5 08 80 movls r5,-128
++ *[0-9a-f]*: f9 b4 07 7f movpl r4,127
++ *[0-9a-f]*: f9 be 01 01 movne lr,1
++ *[0-9a-f]*: f9 b3 05 86 movlt r3,-122
++ *[0-9a-f]*: f9 b8 0d 02 movvc r8,2
++ *[0-9a-f]*: f9 b7 01 91 movne r7,-111
++
++[0-9a-f]* <cp_b>:
++ *[0-9a-f]*: e0 0f 18 00 cp\.b pc,r0
++ *[0-9a-f]*: fe 00 18 00 cp\.b r0,pc
++ *[0-9a-f]*: f0 07 18 00 cp\.b r7,r8
++ *[0-9a-f]*: ee 08 18 00 cp\.b r8,r7
++
++[0-9a-f]* <cp_h>:
++ *[0-9a-f]*: e0 0f 19 00 cp\.h pc,r0
++ *[0-9a-f]*: fe 00 19 00 cp\.h r0,pc
++ *[0-9a-f]*: f0 07 19 00 cp\.h r7,r8
++ *[0-9a-f]*: ee 08 19 00 cp\.h r8,r7
++
++[0-9a-f]* <ldm>:
++ *[0-9a-f]*: e1 cf 00 7e ldm pc,r1-r6
++ *[0-9a-f]*: e1 cc ff ff ldm r12,r0-pc
++ *[0-9a-f]*: e1 c5 80 00 ldm r5,pc
++ *[0-9a-f]*: e1 c4 7f ff ldm r4,r0-lr
++ *[0-9a-f]*: e1 ce 00 01 ldm lr,r0
++ *[0-9a-f]*: e1 c9 40 22 ldm r9,r1,r5,lr
++ *[0-9a-f]*: e1 cb 81 ec ldm r11,r2-r3,r5-r8,pc
++ *[0-9a-f]*: e1 c6 a2 09 ldm r6,r0,r3,r9,sp,pc
++
++[0-9a-f]* <ldm_pu>:
++ *[0-9a-f]*: e3 cf 03 c0 ldm pc\+\+,r6-r9
++ *[0-9a-f]*: e3 cc ff ff ldm r12\+\+,r0-pc
++ *[0-9a-f]*: e3 c5 80 00 ldm r5\+\+,pc
++ *[0-9a-f]*: e3 c4 7f ff ldm r4\+\+,r0-lr
++ *[0-9a-f]*: e3 ce 00 01 ldm lr\+\+,r0
++ *[0-9a-f]*: e3 cc d5 38 ldm r12\+\+,r3-r5,r8,r10,r12,lr-pc
++ *[0-9a-f]*: e3 ca c0 74 ldm r10\+\+,r2,r4-r6,lr-pc
++ *[0-9a-f]*: e3 c6 7e 1a ldm r6\+\+,r1,r3-r4,r9-lr
++
++[0-9a-f]* <ldmts>:
++ *[0-9a-f]*: e5 cf 01 80 ldmts pc,r7-r8
++ *[0-9a-f]*: e5 cc ff ff ldmts r12,r0-pc
++ *[0-9a-f]*: e5 c5 80 00 ldmts r5,pc
++ *[0-9a-f]*: e5 c4 7f ff ldmts r4,r0-lr
++ *[0-9a-f]*: e5 ce 00 01 ldmts lr,r0
++ *[0-9a-f]*: e5 c0 18 06 ldmts r0,r1-r2,r11-r12
++ *[0-9a-f]*: e5 ce 61 97 ldmts lr,r0-r2,r4,r7-r8,sp-lr
++ *[0-9a-f]*: e5 cc c2 3b ldmts r12,r0-r1,r3-r5,r9,lr-pc
++
++[0-9a-f]* <ldmts_pu>:
++ *[0-9a-f]*: e7 cf 02 00 ldmts pc\+\+,r9
++ *[0-9a-f]*: e7 cc ff ff ldmts r12\+\+,r0-pc
++ *[0-9a-f]*: e7 c5 80 00 ldmts r5\+\+,pc
++ *[0-9a-f]*: e7 c4 7f ff ldmts r4\+\+,r0-lr
++ *[0-9a-f]*: e7 ce 00 01 ldmts lr\+\+,r0
++ *[0-9a-f]*: e7 cd 0a bd ldmts sp\+\+,r0,r2-r5,r7,r9,r11
++ *[0-9a-f]*: e7 c5 0c 8e ldmts r5\+\+,r1-r3,r7,r10-r11
++ *[0-9a-f]*: e7 c8 a1 9c ldmts r8\+\+,r2-r4,r7-r8,sp,pc
++
++[0-9a-f]* <stm>:
++ *[0-9a-f]*: e9 cf 00 80 stm pc,r7
++ *[0-9a-f]*: e9 cc ff ff stm r12,r0-pc
++ *[0-9a-f]*: e9 c5 80 00 stm r5,pc
++ *[0-9a-f]*: e9 c4 7f ff stm r4,r0-lr
++ *[0-9a-f]*: e9 ce 00 01 stm lr,r0
++ *[0-9a-f]*: e9 cd 49 2c stm sp,r2-r3,r5,r8,r11,lr
++ *[0-9a-f]*: e9 c4 4c 5f stm r4,r0-r4,r6,r10-r11,lr
++ *[0-9a-f]*: e9 c9 f2 22 stm r9,r1,r5,r9,r12-pc
++
++[0-9a-f]* <stm_pu>:
++ *[0-9a-f]*: eb cf 00 70 stm --pc,r4-r6
++ *[0-9a-f]*: eb cc ff ff stm --r12,r0-pc
++ *[0-9a-f]*: eb c5 80 00 stm --r5,pc
++ *[0-9a-f]*: eb c4 7f ff stm --r4,r0-lr
++ *[0-9a-f]*: eb ce 00 01 stm --lr,r0
++ *[0-9a-f]*: eb cb fb f1 stm --r11,r0,r4-r9,r11-pc
++ *[0-9a-f]*: eb cb 56 09 stm --r11,r0,r3,r9-r10,r12,lr
++ *[0-9a-f]*: eb c6 63 04 stm --r6,r2,r8-r9,sp-lr
++
++[0-9a-f]* <stmts>:
++ *[0-9a-f]*: ed cf 01 00 stmts pc,r8
++ *[0-9a-f]*: ed cc ff ff stmts r12,r0-pc
++ *[0-9a-f]*: ed c5 80 00 stmts r5,pc
++ *[0-9a-f]*: ed c4 7f ff stmts r4,r0-lr
++ *[0-9a-f]*: ed ce 00 01 stmts lr,r0
++ *[0-9a-f]*: ed c1 c6 5b stmts r1,r0-r1,r3-r4,r6,r9-r10,lr-pc
++ *[0-9a-f]*: ed c3 1d c1 stmts r3,r0,r6-r8,r10-r12
++ *[0-9a-f]*: ed cb d6 d1 stmts r11,r0,r4,r6-r7,r9-r10,r12,lr-pc
++
++[0-9a-f]* <stmts_pu>:
++ *[0-9a-f]*: ef cf 01 c0 stmts --pc,r6-r8
++ *[0-9a-f]*: ef cc ff ff stmts --r12,r0-pc
++ *[0-9a-f]*: ef c5 80 00 stmts --r5,pc
++ *[0-9a-f]*: ef c4 7f ff stmts --r4,r0-lr
++ *[0-9a-f]*: ef ce 00 01 stmts --lr,r0
++ *[0-9a-f]*: ef c2 36 19 stmts --r2,r0,r3-r4,r9-r10,r12-sp
++ *[0-9a-f]*: ef c3 c0 03 stmts --r3,r0-r1,lr-pc
++ *[0-9a-f]*: ef c0 44 7d stmts --r0,r0,r2-r6,r10,lr
++
++[0-9a-f]* <ldins_h>:
++ *[0-9a-f]*: ff df 00 00 ldins\.h pc:b,pc\[0\]
++ *[0-9a-f]*: f9 dc 1f ff ldins\.h r12:t,r12\[-2\]
++ *[0-9a-f]*: eb d5 18 00 ldins\.h r5:t,r5\[-4096\]
++ *[0-9a-f]*: e9 d4 07 ff ldins\.h r4:b,r4\[4094\]
++ *[0-9a-f]*: fd de 10 01 ldins\.h lr:t,lr\[2\]
++ *[0-9a-f]*: fd d0 13 c5 ldins\.h r0:t,lr\[1930\]
++ *[0-9a-f]*: ef d3 0e f5 ldins\.h r3:b,r7\[-534\]
++ *[0-9a-f]*: f9 d2 0b 9a ldins\.h r2:b,r12\[-2252\]
++
++[0-9a-f]* <ldins_b>:
++ *[0-9a-f]*: ff df 40 00 ldins\.b pc:b,pc\[0\]
++ *[0-9a-f]*: f9 dc 7f ff ldins\.b r12:t,r12\[-1\]
++ *[0-9a-f]*: eb d5 68 00 ldins\.b r5:u,r5\[-2048\]
++ *[0-9a-f]*: e9 d4 57 ff ldins\.b r4:l,r4\[2047\]
++ *[0-9a-f]*: fd de 50 01 ldins\.b lr:l,lr\[1\]
++ *[0-9a-f]*: e9 d6 7d 6a ldins\.b r6:t,r4\[-662\]
++ *[0-9a-f]*: e3 d5 4f 69 ldins\.b r5:b,r1\[-151\]
++ *[0-9a-f]*: f7 da 78 7d ldins\.b r10:t,r11\[-1923\]
++
++[0-9a-f]* <ldswp_sh>:
++ *[0-9a-f]*: ff df 20 00 ldswp\.sh pc,pc\[0\]
++ *[0-9a-f]*: f9 dc 2f ff ldswp\.sh r12,r12\[-2\]
++ *[0-9a-f]*: eb d5 28 00 ldswp\.sh r5,r5\[-4096\]
++ *[0-9a-f]*: e9 d4 27 ff ldswp\.sh r4,r4\[4094\]
++ *[0-9a-f]*: fd de 20 01 ldswp\.sh lr,lr\[2\]
++ *[0-9a-f]*: f5 d9 27 84 ldswp\.sh r9,r10\[3848\]
++ *[0-9a-f]*: f9 d4 2c 04 ldswp\.sh r4,r12\[-2040\]
++ *[0-9a-f]*: e5 da 26 08 ldswp\.sh r10,r2\[3088\]
++
++[0-9a-f]* <ldswp_uh>:
++ *[0-9a-f]*: ff df 30 00 ldswp\.uh pc,pc\[0\]
++ *[0-9a-f]*: f9 dc 3f ff ldswp\.uh r12,r12\[-2\]
++ *[0-9a-f]*: eb d5 38 00 ldswp\.uh r5,r5\[-4096\]
++ *[0-9a-f]*: e9 d4 37 ff ldswp\.uh r4,r4\[4094\]
++ *[0-9a-f]*: fd de 30 01 ldswp\.uh lr,lr\[2\]
++ *[0-9a-f]*: f3 d4 37 46 ldswp\.uh r4,r9\[3724\]
++ *[0-9a-f]*: fb de 3c bc ldswp\.uh lr,sp\[-1672\]
++ *[0-9a-f]*: f9 d8 38 7d ldswp\.uh r8,r12\[-3846\]
++
++[0-9a-f]* <ldswp_w>:
++ *[0-9a-f]*: ff df 80 00 ldswp\.w pc,pc\[0\]
++ *[0-9a-f]*: f9 dc 8f ff ldswp\.w r12,r12\[-4\]
++ *[0-9a-f]*: eb d5 88 00 ldswp\.w r5,r5\[-8192\]
++ *[0-9a-f]*: e9 d4 87 ff ldswp\.w r4,r4\[8188\]
++ *[0-9a-f]*: fd de 80 01 ldswp\.w lr,lr\[4\]
++ *[0-9a-f]*: ef dd 81 d1 ldswp\.w sp,r7\[1860\]
++ *[0-9a-f]*: eb df 8c c1 ldswp\.w pc,r5\[-3324\]
++ *[0-9a-f]*: f5 dc 8c c8 ldswp\.w r12,r10\[-3296\]
++
++[0-9a-f]* <stswp_h>:
++ *[0-9a-f]*: ff df 90 00 stswp\.h pc\[0\],pc
++ *[0-9a-f]*: f9 dc 9f ff stswp\.h r12\[-2\],r12
++ *[0-9a-f]*: eb d5 98 00 stswp\.h r5\[-4096\],r5
++ *[0-9a-f]*: e9 d4 97 ff stswp\.h r4\[4094\],r4
++ *[0-9a-f]*: fd de 90 01 stswp\.h lr\[2\],lr
++ *[0-9a-f]*: ef da 90 20 stswp\.h r7\[64\],r10
++ *[0-9a-f]*: f5 d2 95 e8 stswp\.h r10\[3024\],r2
++ *[0-9a-f]*: e1 da 9b 74 stswp\.h r0\[-2328\],r10
++
++[0-9a-f]* <stswp_w>:
++ *[0-9a-f]*: ff df a0 00 stswp\.w pc\[0\],pc
++ *[0-9a-f]*: f9 dc af ff stswp\.w r12\[-4\],r12
++ *[0-9a-f]*: eb d5 a8 00 stswp\.w r5\[-8192\],r5
++ *[0-9a-f]*: e9 d4 a7 ff stswp\.w r4\[8188\],r4
++ *[0-9a-f]*: fd de a0 01 stswp\.w lr\[4\],lr
++ *[0-9a-f]*: ff d8 a1 21 stswp\.w pc\[1156\],r8
++ *[0-9a-f]*: fb da a7 ce stswp\.w sp\[7992\],r10
++ *[0-9a-f]*: f1 d5 ae db stswp\.w r8\[-1172\],r5
++
++[0-9a-f]* <and2>:
++ *[0-9a-f]*: ff ef 00 0f and pc,pc,pc
++ *[0-9a-f]*: f9 ec 01 fc and r12,r12,r12<<0x1f
++ *[0-9a-f]*: eb e5 01 05 and r5,r5,r5<<0x10
++ *[0-9a-f]*: e9 e4 00 f4 and r4,r4,r4<<0xf
++ *[0-9a-f]*: fd ee 00 1e and lr,lr,lr<<0x1
++ *[0-9a-f]*: e5 e1 00 1a and r10,r2,r1<<0x1
++ *[0-9a-f]*: f1 eb 01 bc and r12,r8,r11<<0x1b
++ *[0-9a-f]*: ef e0 00 3a and r10,r7,r0<<0x3
++
++[0-9a-f]* <and3>:
++ *[0-9a-f]*: ff ef 02 0f and pc,pc,pc
++ *[0-9a-f]*: f9 ec 03 fc and r12,r12,r12>>0x1f
++ *[0-9a-f]*: eb e5 03 05 and r5,r5,r5>>0x10
++ *[0-9a-f]*: e9 e4 02 f4 and r4,r4,r4>>0xf
++ *[0-9a-f]*: fd ee 02 1e and lr,lr,lr>>0x1
++ *[0-9a-f]*: f1 e7 03 1c and r12,r8,r7>>0x11
++ *[0-9a-f]*: e9 e9 03 4f and pc,r4,r9>>0x14
++ *[0-9a-f]*: f3 ea 02 ca and r10,r9,r10>>0xc
++
++[0-9a-f]* <or2>:
++ *[0-9a-f]*: ff ef 10 0f or pc,pc,pc
++ *[0-9a-f]*: f9 ec 11 fc or r12,r12,r12<<0x1f
++ *[0-9a-f]*: eb e5 11 05 or r5,r5,r5<<0x10
++ *[0-9a-f]*: e9 e4 10 f4 or r4,r4,r4<<0xf
++ *[0-9a-f]*: fd ee 10 1e or lr,lr,lr<<0x1
++ *[0-9a-f]*: fb eb 11 d8 or r8,sp,r11<<0x1d
++ *[0-9a-f]*: f3 e2 11 cf or pc,r9,r2<<0x1c
++ *[0-9a-f]*: e3 e2 10 35 or r5,r1,r2<<0x3
++
++[0-9a-f]* <or3>:
++ *[0-9a-f]*: ff ef 12 0f or pc,pc,pc
++ *[0-9a-f]*: f9 ec 13 fc or r12,r12,r12>>0x1f
++ *[0-9a-f]*: eb e5 13 05 or r5,r5,r5>>0x10
++ *[0-9a-f]*: e9 e4 12 f4 or r4,r4,r4>>0xf
++ *[0-9a-f]*: fd ee 12 1e or lr,lr,lr>>0x1
++ *[0-9a-f]*: fb ed 12 21 or r1,sp,sp>>0x2
++ *[0-9a-f]*: e3 e1 13 d0 or r0,r1,r1>>0x1d
++ *[0-9a-f]*: f9 e8 12 84 or r4,r12,r8>>0x8
++
++[0-9a-f]* <eor2>:
++ *[0-9a-f]*: ff ef 20 0f eor pc,pc,pc
++ *[0-9a-f]*: f9 ec 21 fc eor r12,r12,r12<<0x1f
++ *[0-9a-f]*: eb e5 21 05 eor r5,r5,r5<<0x10
++ *[0-9a-f]*: e9 e4 20 f4 eor r4,r4,r4<<0xf
++ *[0-9a-f]*: fd ee 20 1e eor lr,lr,lr<<0x1
++ *[0-9a-f]*: f3 e4 20 ba eor r10,r9,r4<<0xb
++ *[0-9a-f]*: e1 e1 21 f4 eor r4,r0,r1<<0x1f
++ *[0-9a-f]*: e5 ec 20 d6 eor r6,r2,r12<<0xd
++
++[0-9a-f]* <eor3>:
++ *[0-9a-f]*: ff ef 22 0f eor pc,pc,pc
++ *[0-9a-f]*: f9 ec 23 fc eor r12,r12,r12>>0x1f
++ *[0-9a-f]*: eb e5 23 05 eor r5,r5,r5>>0x10
++ *[0-9a-f]*: e9 e4 22 f4 eor r4,r4,r4>>0xf
++ *[0-9a-f]*: fd ee 22 1e eor lr,lr,lr>>0x1
++ *[0-9a-f]*: eb e5 23 65 eor r5,r5,r5>>0x16
++ *[0-9a-f]*: e3 ee 22 3a eor r10,r1,lr>>0x3
++ *[0-9a-f]*: fd ed 23 a7 eor r7,lr,sp>>0x1a
++
++[0-9a-f]* <sthh_w2>:
++ *[0-9a-f]*: ff ef 8f 0f sthh\.w pc\[pc\],pc:b,pc:b
++ *[0-9a-f]*: f9 ec bc 3c sthh\.w r12\[r12<<0x3\],r12:t,r12:t
++ *[0-9a-f]*: eb e5 b5 25 sthh\.w r5\[r5<<0x2\],r5:t,r5:t
++ *[0-9a-f]*: e9 e4 84 14 sthh\.w r4\[r4<<0x1\],r4:b,r4:b
++ *[0-9a-f]*: fd ee be 1e sthh\.w lr\[lr<<0x1\],lr:t,lr:t
++ *[0-9a-f]*: e3 ec b6 3d sthh\.w sp\[r6<<0x3\],r1:t,r12:t
++ *[0-9a-f]*: f3 e9 b6 06 sthh\.w r6\[r6\],r9:t,r9:t
++ *[0-9a-f]*: e1 eb 93 0a sthh\.w r10\[r3\],r0:b,r11:t
++
++[0-9a-f]* <sthh_w1>:
++ *[0-9a-f]*: ff ef c0 0f sthh\.w pc\[0x0\],pc:b,pc:b
++ *[0-9a-f]*: f9 ec ff fc sthh\.w r12\[0x3fc\],r12:t,r12:t
++ *[0-9a-f]*: eb e5 f8 05 sthh\.w r5\[0x200\],r5:t,r5:t
++ *[0-9a-f]*: e9 e4 c7 f4 sthh\.w r4\[0x1fc\],r4:b,r4:b
++ *[0-9a-f]*: fd ee f0 1e sthh\.w lr\[0x4\],lr:t,lr:t
++ *[0-9a-f]*: f3 e0 e6 54 sthh\.w r4\[0x194\],r9:t,r0:b
++ *[0-9a-f]*: e5 ea e5 78 sthh\.w r8\[0x15c\],r2:t,r10:b
++ *[0-9a-f]*: f3 e2 c2 bd sthh\.w sp\[0xac\],r9:b,r2:b
++
++[0-9a-f]* <cop>:
++ *[0-9a-f]*: e1 a0 00 00 cop cp0,cr0,cr0,cr0,0x0
++ *[0-9a-f]*: e7 af ff ff cop cp7,cr15,cr15,cr15,0x7f
++ *[0-9a-f]*: e3 a8 75 55 cop cp3,cr5,cr5,cr5,0x31
++ *[0-9a-f]*: e3 a8 44 44 cop cp2,cr4,cr4,cr4,0x30
++ *[0-9a-f]*: e5 ad a8 37 cop cp5,cr8,cr3,cr7,0x5a
++
++[0-9a-f]* <ldc_w1>:
++ *[0-9a-f]*: e9 a0 00 00 ldc\.w cp0,cr0,r0\[0x0\]
++ *[0-9a-f]*: e9 af ef ff ldc\.w cp7,cr15,pc\[0x3fc\]
++ *[0-9a-f]*: e9 a5 65 80 ldc\.w cp3,cr5,r5\[0x200\]
++ *[0-9a-f]*: e9 a4 44 7f ldc\.w cp2,cr4,r4\[0x1fc\]
++ *[0-9a-f]*: e9 ad 89 24 ldc\.w cp4,cr9,sp\[0x90\]
++
++[0-9a-f]* <ldc_w2>:
++ *[0-9a-f]*: ef a0 00 40 ldc\.w cp0,cr0,--r0
++ *[0-9a-f]*: ef af ef 40 ldc\.w cp7,cr15,--pc
++ *[0-9a-f]*: ef a5 65 40 ldc\.w cp3,cr5,--r5
++ *[0-9a-f]*: ef a4 44 40 ldc\.w cp2,cr4,--r4
++ *[0-9a-f]*: ef ad 89 40 ldc\.w cp4,cr9,--sp
++
++[0-9a-f]* <ldc_w3>:
++ *[0-9a-f]*: ef a0 10 00 ldc\.w cp0,cr0,r0\[r0\]
++ *[0-9a-f]*: ef af ff 3f ldc\.w cp7,cr15,pc\[pc<<0x3\]
++ *[0-9a-f]*: ef a5 75 24 ldc\.w cp3,cr5,r5\[r4<<0x2\]
++ *[0-9a-f]*: ef a4 54 13 ldc\.w cp2,cr4,r4\[r3<<0x1\]
++ *[0-9a-f]*: ef ad 99 0c ldc\.w cp4,cr9,sp\[r12\]
++
++[0-9a-f]* <ldc_d1>:
++ *[0-9a-f]*: e9 a0 10 00 ldc\.d cp0,cr0,r0\[0x0\]
++ *[0-9a-f]*: e9 af fe ff ldc\.d cp7,cr14,pc\[0x3fc\]
++ *[0-9a-f]*: e9 a5 76 80 ldc\.d cp3,cr6,r5\[0x200\]
++ *[0-9a-f]*: e9 a4 54 7f ldc\.d cp2,cr4,r4\[0x1fc\]
++ *[0-9a-f]*: e9 ad 98 24 ldc\.d cp4,cr8,sp\[0x90\]
++
++[0-9a-f]* <ldc_d2>:
++ *[0-9a-f]*: ef a0 00 50 ldc\.d cp0,cr0,--r0
++ *[0-9a-f]*: ef af ee 50 ldc\.d cp7,cr14,--pc
++ *[0-9a-f]*: ef a5 66 50 ldc\.d cp3,cr6,--r5
++ *[0-9a-f]*: ef a4 44 50 ldc\.d cp2,cr4,--r4
++ *[0-9a-f]*: ef ad 88 50 ldc\.d cp4,cr8,--sp
++
++[0-9a-f]* <ldc_d3>:
++ *[0-9a-f]*: ef a0 10 40 ldc\.d cp0,cr0,r0\[r0\]
++ *[0-9a-f]*: ef af fe 7f ldc\.d cp7,cr14,pc\[pc<<0x3\]
++ *[0-9a-f]*: ef a5 76 64 ldc\.d cp3,cr6,r5\[r4<<0x2\]
++ *[0-9a-f]*: ef a4 54 53 ldc\.d cp2,cr4,r4\[r3<<0x1\]
++ *[0-9a-f]*: ef ad 98 4c ldc\.d cp4,cr8,sp\[r12\]
++
++[0-9a-f]* <stc_w1>:
++ *[0-9a-f]*: eb a0 00 00 stc\.w cp0,r0\[0x0\],cr0
++ *[0-9a-f]*: eb af ef ff stc\.w cp7,pc\[0x3fc\],cr15
++ *[0-9a-f]*: eb a5 65 80 stc\.w cp3,r5\[0x200\],cr5
++ *[0-9a-f]*: eb a4 44 7f stc\.w cp2,r4\[0x1fc\],cr4
++ *[0-9a-f]*: eb ad 89 24 stc\.w cp4,sp\[0x90\],cr9
++
++[0-9a-f]* <stc_w2>:
++ *[0-9a-f]*: ef a0 00 60 stc\.w cp0,r0\+\+,cr0
++ *[0-9a-f]*: ef af ef 60 stc\.w cp7,pc\+\+,cr15
++ *[0-9a-f]*: ef a5 65 60 stc\.w cp3,r5\+\+,cr5
++ *[0-9a-f]*: ef a4 44 60 stc\.w cp2,r4\+\+,cr4
++ *[0-9a-f]*: ef ad 89 60 stc\.w cp4,sp\+\+,cr9
++
++[0-9a-f]* <stc_w3>:
++ *[0-9a-f]*: ef a0 10 80 stc\.w cp0,r0\[r0\],cr0
++ *[0-9a-f]*: ef af ff bf stc\.w cp7,pc\[pc<<0x3\],cr15
++ *[0-9a-f]*: ef a5 75 a4 stc\.w cp3,r5\[r4<<0x2\],cr5
++ *[0-9a-f]*: ef a4 54 93 stc\.w cp2,r4\[r3<<0x1\],cr4
++ *[0-9a-f]*: ef ad 99 8c stc\.w cp4,sp\[r12\],cr9
++
++[0-9a-f]* <stc_d1>:
++ *[0-9a-f]*: eb a0 10 00 stc\.d cp0,r0\[0x0\],cr0
++ *[0-9a-f]*: eb af fe ff stc\.d cp7,pc\[0x3fc\],cr14
++ *[0-9a-f]*: eb a5 76 80 stc\.d cp3,r5\[0x200\],cr6
++ *[0-9a-f]*: eb a4 54 7f stc\.d cp2,r4\[0x1fc\],cr4
++ *[0-9a-f]*: eb ad 98 24 stc\.d cp4,sp\[0x90\],cr8
++
++[0-9a-f]* <stc_d2>:
++ *[0-9a-f]*: ef a0 00 70 stc\.d cp0,r0\+\+,cr0
++ *[0-9a-f]*: ef af ee 70 stc\.d cp7,pc\+\+,cr14
++ *[0-9a-f]*: ef a5 66 70 stc\.d cp3,r5\+\+,cr6
++ *[0-9a-f]*: ef a4 44 70 stc\.d cp2,r4\+\+,cr4
++ *[0-9a-f]*: ef ad 88 70 stc\.d cp4,sp\+\+,cr8
++
++[0-9a-f]* <stc_d3>:
++ *[0-9a-f]*: ef a0 10 c0 stc\.d cp0,r0\[r0\],cr0
++ *[0-9a-f]*: ef af fe ff stc\.d cp7,pc\[pc<<0x3\],cr14
++ *[0-9a-f]*: ef a5 76 e4 stc\.d cp3,r5\[r4<<0x2\],cr6
++ *[0-9a-f]*: ef a4 54 d3 stc\.d cp2,r4\[r3<<0x1\],cr4
++ *[0-9a-f]*: ef ad 98 cc stc\.d cp4,sp\[r12\],cr8
++
++[0-9a-f]* <ldc0_w>:
++ *[0-9a-f]*: f1 a0 00 00 ldc0\.w cr0,r0\[0x0\]
++ *[0-9a-f]*: f1 af ff ff ldc0\.w cr15,pc\[0x3ffc\]
++ *[0-9a-f]*: f1 a5 85 00 ldc0\.w cr5,r5\[0x2000\]
++ *[0-9a-f]*: f1 a4 74 ff ldc0\.w cr4,r4\[0x1ffc\]
++ *[0-9a-f]*: f1 ad 09 93 ldc0\.w cr9,sp\[0x24c\]
++
++[0-9a-f]* <ldc0_d>:
++ *[0-9a-f]*: f3 a0 00 00 ldc0\.d cr0,r0\[0x0\]
++ *[0-9a-f]*: f3 af fe ff ldc0\.d cr14,pc\[0x3ffc\]
++ *[0-9a-f]*: f3 a5 86 00 ldc0\.d cr6,r5\[0x2000\]
++ *[0-9a-f]*: f3 a4 74 ff ldc0\.d cr4,r4\[0x1ffc\]
++ *[0-9a-f]*: f3 ad 08 93 ldc0\.d cr8,sp\[0x24c\]
++
++[0-9a-f]* <stc0_w>:
++ *[0-9a-f]*: f5 a0 00 00 stc0\.w r0\[0x0\],cr0
++ *[0-9a-f]*: f5 af ff ff stc0\.w pc\[0x3ffc\],cr15
++ *[0-9a-f]*: f5 a5 85 00 stc0\.w r5\[0x2000\],cr5
++ *[0-9a-f]*: f5 a4 74 ff stc0\.w r4\[0x1ffc\],cr4
++ *[0-9a-f]*: f5 ad 09 93 stc0\.w sp\[0x24c\],cr9
++
++[0-9a-f]* <stc0_d>:
++ *[0-9a-f]*: f7 a0 00 00 stc0\.d r0\[0x0\],cr0
++ *[0-9a-f]*: f7 af fe ff stc0\.d pc\[0x3ffc\],cr14
++ *[0-9a-f]*: f7 a5 86 00 stc0\.d r5\[0x2000\],cr6
++ *[0-9a-f]*: f7 a4 74 ff stc0\.d r4\[0x1ffc\],cr4
++ *[0-9a-f]*: f7 ad 08 93 stc0\.d sp\[0x24c\],cr8
++
++[0-9a-f]* <memc>:
++ *[0-9a-f]*: f6 10 00 00 memc 0,0x0
++ *[0-9a-f]*: f6 1f ff ff memc -4,0x1f
++ *[0-9a-f]*: f6 18 40 00 memc -65536,0x10
++ *[0-9a-f]*: f6 17 bf ff memc 65532,0xf
++
++[0-9a-f]* <mems>:
++ *[0-9a-f]*: f8 10 00 00 mems 0,0x0
++ *[0-9a-f]*: f8 1f ff ff mems -4,0x1f
++ *[0-9a-f]*: f8 18 40 00 mems -65536,0x10
++ *[0-9a-f]*: f8 17 bf ff mems 65532,0xf
++
++[0-9a-f]* <memt>:
++ *[0-9a-f]*: fa 10 00 00 memt 0,0x0
++ *[0-9a-f]*: fa 1f ff ff memt -4,0x1f
++ *[0-9a-f]*: fa 18 40 00 memt -65536,0x10
++ *[0-9a-f]*: fa 17 bf ff memt 65532,0xf
++
++[0-9a-f]* <stcond>:
++ *[0-9a-f]*: e1 70 00 00 stcond r0\[0\],r0
++ *[0-9a-f]*: ff 7f ff ff stcond pc\[-1\],pc
++ *[0-9a-f]*: f1 77 80 00 stcond r8\[-32768\],r7
++ *[0-9a-f]*: ef 78 7f ff stcond r7\[32767\],r8
++ *[0-9a-f]*: eb 7a 12 34 stcond r5\[4660\],r10
++
++[0-9a-f]* <ldcm_w>:
++ *[0-9a-f]*: ed af 00 ff ldcm\.w cp0,pc,cr0-cr7
++ *[0-9a-f]*: ed a0 e0 01 ldcm\.w cp7,r0,cr0
++ *[0-9a-f]*: ed a4 90 7f ldcm\.w cp4,r4\+\+,cr0-cr6
++ *[0-9a-f]*: ed a7 60 80 ldcm\.w cp3,r7,cr7
++ *[0-9a-f]*: ed ac 30 72 ldcm\.w cp1,r12\+\+,cr1,cr4-cr6
++ *[0-9a-f]*: ed af 01 ff ldcm\.w cp0,pc,cr8-cr15
++ *[0-9a-f]*: ed a0 e1 01 ldcm\.w cp7,r0,cr8
++ *[0-9a-f]*: ed a4 91 7f ldcm\.w cp4,r4\+\+,cr8-cr14
++ *[0-9a-f]*: ed a7 61 80 ldcm\.w cp3,r7,cr15
++ *[0-9a-f]*: ed ac 31 72 ldcm\.w cp1,r12\+\+,cr9,cr12-cr14
++
++[0-9a-f]* <ldcm_d>:
++ *[0-9a-f]*: ed af 04 ff ldcm\.d cp0,pc,cr0-cr15
++ *[0-9a-f]*: ed a0 e4 01 ldcm\.d cp7,r0,cr0-cr1
++ *[0-9a-f]*: ed a4 94 7f ldcm\.d cp4,r4\+\+,cr0-cr13
++ *[0-9a-f]*: ed a7 64 80 ldcm\.d cp3,r7,cr14-cr15
++ *[0-9a-f]*: ed ac 54 93 ldcm\.d cp2,r12\+\+,cr0-cr3,cr8-cr9,cr14-cr15
++
++[0-9a-f]* <stcm_w>:
++ *[0-9a-f]*: ed af 02 ff stcm\.w cp0,pc,cr0-cr7
++ *[0-9a-f]*: ed a0 e2 01 stcm\.w cp7,r0,cr0
++ *[0-9a-f]*: ed a4 92 7f stcm\.w cp4,--r4,cr0-cr6
++ *[0-9a-f]*: ed a7 62 80 stcm\.w cp3,r7,cr7
++ *[0-9a-f]*: ed ac 32 72 stcm\.w cp1,--r12,cr1,cr4-cr6
++ *[0-9a-f]*: ed af 03 ff stcm\.w cp0,pc,cr8-cr15
++ *[0-9a-f]*: ed a0 e3 01 stcm\.w cp7,r0,cr8
++ *[0-9a-f]*: ed a4 93 7f stcm\.w cp4,--r4,cr8-cr14
++ *[0-9a-f]*: ed a7 63 80 stcm\.w cp3,r7,cr15
++ *[0-9a-f]*: ed ac 33 72 stcm\.w cp1,--r12,cr9,cr12-cr14
++
++[0-9a-f]* <stcm_d>:
++ *[0-9a-f]*: ed af 05 ff stcm\.d cp0,pc,cr0-cr15
++ *[0-9a-f]*: ed a0 e5 01 stcm\.d cp7,r0,cr0-cr1
++ *[0-9a-f]*: ed a4 95 7f stcm\.d cp4,--r4,cr0-cr13
++ *[0-9a-f]*: ed a7 65 80 stcm\.d cp3,r7,cr14-cr15
++ *[0-9a-f]*: ed ac 55 93 stcm\.d cp2,--r12,cr0-cr3,cr8-cr9,cr14-cr15
++
++[0-9a-f]* <mvcr_w>:
++ *[0-9a-f]*: ef af ef 00 mvcr\.w cp7,pc,cr15
++ *[0-9a-f]*: ef a0 00 00 mvcr\.w cp0,r0,cr0
++ *[0-9a-f]*: ef af 0f 00 mvcr\.w cp0,pc,cr15
++ *[0-9a-f]*: ef a0 ef 00 mvcr\.w cp7,r0,cr15
++ *[0-9a-f]*: ef af e0 00 mvcr\.w cp7,pc,cr0
++ *[0-9a-f]*: ef a7 88 00 mvcr\.w cp4,r7,cr8
++ *[0-9a-f]*: ef a8 67 00 mvcr\.w cp3,r8,cr7
++
++[0-9a-f]* <mvcr_d>:
++ *[0-9a-f]*: ef ae ee 10 mvcr\.d cp7,lr,cr14
++ *[0-9a-f]*: ef a0 00 10 mvcr\.d cp0,r0,cr0
++ *[0-9a-f]*: ef ae 0e 10 mvcr\.d cp0,lr,cr14
++ *[0-9a-f]*: ef a0 ee 10 mvcr\.d cp7,r0,cr14
++ *[0-9a-f]*: ef ae e0 10 mvcr\.d cp7,lr,cr0
++ *[0-9a-f]*: ef a6 88 10 mvcr\.d cp4,r6,cr8
++ *[0-9a-f]*: ef a8 66 10 mvcr\.d cp3,r8,cr6
++
++[0-9a-f]* <mvrc_w>:
++ *[0-9a-f]*: ef af ef 20 mvrc\.w cp7,cr15,pc
++ *[0-9a-f]*: ef a0 00 20 mvrc\.w cp0,cr0,r0
++ *[0-9a-f]*: ef af 0f 20 mvrc\.w cp0,cr15,pc
++ *[0-9a-f]*: ef a0 ef 20 mvrc\.w cp7,cr15,r0
++ *[0-9a-f]*: ef af e0 20 mvrc\.w cp7,cr0,pc
++ *[0-9a-f]*: ef a7 88 20 mvrc\.w cp4,cr8,r7
++ *[0-9a-f]*: ef a8 67 20 mvrc\.w cp3,cr7,r8
++
++[0-9a-f]* <mvrc_d>:
++ *[0-9a-f]*: ef ae ee 30 mvrc\.d cp7,cr14,lr
++ *[0-9a-f]*: ef a0 00 30 mvrc\.d cp0,cr0,r0
++ *[0-9a-f]*: ef ae 0e 30 mvrc\.d cp0,cr14,lr
++ *[0-9a-f]*: ef a0 ee 30 mvrc\.d cp7,cr14,r0
++ *[0-9a-f]*: ef ae e0 30 mvrc\.d cp7,cr0,lr
++ *[0-9a-f]*: ef a6 88 30 mvrc\.d cp4,cr8,r6
++ *[0-9a-f]*: ef a8 66 30 mvrc\.d cp3,cr6,r8
++
++[0-9a-f]* <bfexts>:
++ *[0-9a-f]*: ff df b3 ff bfexts pc,pc,0x1f,0x1f
++ *[0-9a-f]*: e1 d0 b0 00 bfexts r0,r0,0x0,0x0
++ *[0-9a-f]*: e1 df b3 ff bfexts r0,pc,0x1f,0x1f
++ *[0-9a-f]*: ff d0 b3 ff bfexts pc,r0,0x1f,0x1f
++ *[0-9a-f]*: ff df b0 1f bfexts pc,pc,0x0,0x1f
++ *[0-9a-f]*: ff df b3 e0 bfexts pc,pc,0x1f,0x0
++ *[0-9a-f]*: ef d8 b1 f0 bfexts r7,r8,0xf,0x10
++ *[0-9a-f]*: f1 d7 b2 0f bfexts r8,r7,0x10,0xf
++
++[0-9a-f]* <bfextu>:
++ *[0-9a-f]*: ff df c3 ff bfextu pc,pc,0x1f,0x1f
++ *[0-9a-f]*: e1 d0 c0 00 bfextu r0,r0,0x0,0x0
++ *[0-9a-f]*: e1 df c3 ff bfextu r0,pc,0x1f,0x1f
++ *[0-9a-f]*: ff d0 c3 ff bfextu pc,r0,0x1f,0x1f
++ *[0-9a-f]*: ff df c0 1f bfextu pc,pc,0x0,0x1f
++ *[0-9a-f]*: ff df c3 e0 bfextu pc,pc,0x1f,0x0
++ *[0-9a-f]*: ef d8 c1 f0 bfextu r7,r8,0xf,0x10
++ *[0-9a-f]*: f1 d7 c2 0f bfextu r8,r7,0x10,0xf
++
++[0-9a-f]* <bfins>:
++ *[0-9a-f]*: ff df d3 ff bfins pc,pc,0x1f,0x1f
++ *[0-9a-f]*: e1 d0 d0 00 bfins r0,r0,0x0,0x0
++ *[0-9a-f]*: e1 df d3 ff bfins r0,pc,0x1f,0x1f
++ *[0-9a-f]*: ff d0 d3 ff bfins pc,r0,0x1f,0x1f
++ *[0-9a-f]*: ff df d0 1f bfins pc,pc,0x0,0x1f
++ *[0-9a-f]*: ff df d3 e0 bfins pc,pc,0x1f,0x0
++ *[0-9a-f]*: ef d8 d1 f0 bfins r7,r8,0xf,0x10
++ *[0-9a-f]*: f1 d7 d2 0f bfins r8,r7,0x10,0xf
++
++[0-9a-f]* <rsubc>:
++ *[0-9a-f]*: fb bf 00 00 rsubeq pc,0
++ *[0-9a-f]*: fb bc 0f ff rsubal r12,-1
++ *[0-9a-f]*: fb b5 08 80 rsubls r5,-128
++ *[0-9a-f]*: fb b4 07 7f rsubpl r4,127
++ *[0-9a-f]*: fb be 01 01 rsubne lr,1
++ *[0-9a-f]*: fb bc 08 76 rsubls r12,118
++ *[0-9a-f]*: fb be 0d f4 rsubvc lr,-12
++ *[0-9a-f]*: fb b4 06 f3 rsubmi r4,-13
++
++[0-9a-f]* <addc>:
++ *[0-9a-f]*: ff df e0 0f addeq pc,pc,pc
++ *[0-9a-f]*: f9 dc ef 0c addal r12,r12,r12
++ *[0-9a-f]*: eb d5 e8 05 addls r5,r5,r5
++ *[0-9a-f]*: e9 d4 e7 04 addpl r4,r4,r4
++ *[0-9a-f]*: fd de e1 0e addne lr,lr,lr
++ *[0-9a-f]*: e5 d1 e8 0a addls r10,r2,r1
++ *[0-9a-f]*: f1 db ed 0c addvc r12,r8,r11
++ *[0-9a-f]*: ef d0 e6 0a addmi r10,r7,r0
++
++[0-9a-f]* <subc2>:
++ *[0-9a-f]*: ff df e0 1f subeq pc,pc,pc
++ *[0-9a-f]*: f9 dc ef 1c subal r12,r12,r12
++ *[0-9a-f]*: eb d5 e8 15 subls r5,r5,r5
++ *[0-9a-f]*: e9 d4 e7 14 subpl r4,r4,r4
++ *[0-9a-f]*: fd de e1 1e subne lr,lr,lr
++ *[0-9a-f]*: e5 d1 e8 1a subls r10,r2,r1
++ *[0-9a-f]*: f1 db ed 1c subvc r12,r8,r11
++ *[0-9a-f]*: ef d0 e6 1a submi r10,r7,r0
++
++[0-9a-f]* <andc>:
++ *[0-9a-f]*: ff df e0 2f andeq pc,pc,pc
++ *[0-9a-f]*: f9 dc ef 2c andal r12,r12,r12
++ *[0-9a-f]*: eb d5 e8 25 andls r5,r5,r5
++ *[0-9a-f]*: e9 d4 e7 24 andpl r4,r4,r4
++ *[0-9a-f]*: fd de e1 2e andne lr,lr,lr
++ *[0-9a-f]*: e5 d1 e8 2a andls r10,r2,r1
++ *[0-9a-f]*: f1 db ed 2c andvc r12,r8,r11
++ *[0-9a-f]*: ef d0 e6 2a andmi r10,r7,r0
++
++[0-9a-f]* <orc>:
++ *[0-9a-f]*: ff df e0 3f oreq pc,pc,pc
++ *[0-9a-f]*: f9 dc ef 3c oral r12,r12,r12
++ *[0-9a-f]*: eb d5 e8 35 orls r5,r5,r5
++ *[0-9a-f]*: e9 d4 e7 34 orpl r4,r4,r4
++ *[0-9a-f]*: fd de e1 3e orne lr,lr,lr
++ *[0-9a-f]*: e5 d1 e8 3a orls r10,r2,r1
++ *[0-9a-f]*: f1 db ed 3c orvc r12,r8,r11
++ *[0-9a-f]*: ef d0 e6 3a ormi r10,r7,r0
++
++[0-9a-f]* <eorc>:
++ *[0-9a-f]*: ff df e0 4f eoreq pc,pc,pc
++ *[0-9a-f]*: f9 dc ef 4c eoral r12,r12,r12
++ *[0-9a-f]*: eb d5 e8 45 eorls r5,r5,r5
++ *[0-9a-f]*: e9 d4 e7 44 eorpl r4,r4,r4
++ *[0-9a-f]*: fd de e1 4e eorne lr,lr,lr
++ *[0-9a-f]*: e5 d1 e8 4a eorls r10,r2,r1
++ *[0-9a-f]*: f1 db ed 4c eorvc r12,r8,r11
++ *[0-9a-f]*: ef d0 e6 4a eormi r10,r7,r0
++
++[0-9a-f]* <ldcond>:
++ *[0-9a-f]*: ff ff 01 ff ld.weq pc,pc[0x7fc]
++ *[0-9a-f]*: f9 fc f3 ff ld.shal r12,r12[0x3fe]
++ *[0-9a-f]*: eb f5 84 00 ld.shls r5,r5[0x0]
++ *[0-9a-f]*: e9 f4 79 ff ld.ubpl r4,r4[0x1ff]
++ *[0-9a-f]*: fd fe 16 00 ld.sbne lr,lr[0x0]
++ *[0-9a-f]*: e5 fa 80 00 ld.wls r10,r2[0x0]
++ *[0-9a-f]*: f1 fc d3 ff ld.shvc r12,r8[0x3fe]
++ *[0-9a-f]*: ef fa 68 01 ld.ubmi r10,r7[0x1]
++
++[0-9a-f]* <stcond2>:
++ *[0-9a-f]*: ff ff 0b ff st.weq pc[0x7fc],pc
++ *[0-9a-f]*: f9 fc fd ff st.hal r12[0x3fe],r12
++ *[0-9a-f]*: eb f5 8c 00 st.hls r5[0x0],r5
++ *[0-9a-f]*: e9 f4 7f ff st.bpl r4[0x1ff],r4
++ *[0-9a-f]*: fd fe 1e 00 st.bne lr[0x0],lr
++ *[0-9a-f]*: e5 fa 8a 00 st.wls r2[0x0],r10
++ *[0-9a-f]*: f1 fc dd ff st.hvc r8[0x3fe],r12
++ *[0-9a-f]*: ef fa 6e 01 st.bmi r7[0x1],r10
++
++[0-9a-f]* <movh>:
++ *[0-9a-f]*: fc 1f ff ff movh pc,0xffff
++ *[0-9a-f]*: fc 10 00 00 movh r0,0x0
++ *[0-9a-f]*: fc 15 00 01 movh r5,0x1
++ *[0-9a-f]*: fc 1c 7f ff movh r12,0x7fff
++
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/allinsn.exp
+@@ -0,0 +1,5 @@
++# AVR32 assembler testsuite. -*- Tcl -*-
++
++if [istarget avr32-*-*] {
++ run_dump_test "allinsn"
++}
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/allinsn.s
+@@ -0,0 +1,3330 @@
++ .data
++foodata: .word 42
++ .text
++footext:
++ .text
++ .global ld_d5
++ld_d5:
++ ld.d lr,pc[pc<<3]
++ ld.d r0,r0[r0<<0]
++ ld.d r6,r5[r5<<2]
++ ld.d r4,r4[r4<<1]
++ ld.d lr,lr[lr<<1]
++ ld.d r10,r3[sp<<2]
++ ld.d r8,r10[r6<<2]
++ ld.d r2,r7[r9<<0]
++ .text
++ .global ld_w5
++ld_w5:
++ ld.w pc,pc[pc<<0]
++ ld.w r12,r12[r12<<3]
++ ld.w r5,r5[r5<<2]
++ ld.w r4,r4[r4<<1]
++ ld.w lr,lr[lr<<1]
++ ld.w r2,r9[r9<<0]
++ ld.w r11,r2[r6<<0]
++ ld.w r0,r2[sp<<3]
++ .text
++ .global ld_sh5
++ld_sh5:
++ ld.sh pc,pc[pc<<0]
++ ld.sh r12,r12[r12<<3]
++ ld.sh r5,r5[r5<<2]
++ ld.sh r4,r4[r4<<1]
++ ld.sh lr,lr[lr<<1]
++ ld.sh r11,r0[pc<<2]
++ ld.sh r10,sp[r6<<2]
++ ld.sh r12,r2[r2<<0]
++ .text
++ .global ld_uh5
++ld_uh5:
++ ld.uh pc,pc[pc<<0]
++ ld.uh r12,r12[r12<<3]
++ ld.uh r5,r5[r5<<2]
++ ld.uh r4,r4[r4<<1]
++ ld.uh lr,lr[lr<<1]
++ ld.uh r8,pc[lr<<3]
++ ld.uh r6,r1[pc<<1]
++ ld.uh r6,lr[sp<<1]
++ .text
++ .global ld_sb2
++ld_sb2:
++ ld.sb pc,pc[pc<<0]
++ ld.sb r12,r12[r12<<3]
++ ld.sb r5,r5[r5<<2]
++ ld.sb r4,r4[r4<<1]
++ ld.sb lr,lr[lr<<1]
++ ld.sb r9,r1[pc<<3]
++ ld.sb r0,r3[r11<<1]
++ ld.sb r10,r5[r5<<1]
++ .text
++ .global ld_ub5
++ld_ub5:
++ ld.ub pc,pc[pc<<0]
++ ld.ub r12,r12[r12<<3]
++ ld.ub r5,r5[r5<<2]
++ ld.ub r4,r4[r4<<1]
++ ld.ub lr,lr[lr<<1]
++ ld.ub r6,r12[r7<<3]
++ ld.ub r2,r6[r12<<0]
++ ld.ub r0,r7[r11<<1]
++ .text
++ .global st_d5
++st_d5:
++ st.d pc[pc<<0],r14
++ st.d r12[r12<<3],r12
++ st.d r5[r5<<2],r6
++ st.d r4[r4<<1],r4
++ st.d lr[lr<<1],lr
++ st.d r1[r9<<1],r4
++ st.d r10[r2<<1],r4
++ st.d r12[r6<<0],lr
++ .text
++ .global st_w5
++st_w5:
++ st.w pc[pc<<0],pc
++ st.w r12[r12<<3],r12
++ st.w r5[r5<<2],r5
++ st.w r4[r4<<1],r4
++ st.w lr[lr<<1],lr
++ st.w r1[r10<<0],r3
++ st.w r0[r10<<1],r9
++ st.w r4[r5<<3],pc
++ .text
++ .global st_h5
++st_h5:
++ st.h pc[pc<<0],pc
++ st.h r12[r12<<3],r12
++ st.h r5[r5<<2],r5
++ st.h r4[r4<<1],r4
++ st.h lr[lr<<1],lr
++ st.h r2[r9<<0],r11
++ st.h r5[r1<<2],r12
++ st.h pc[r8<<2],r3
++ .text
++ .global st_b5
++st_b5:
++ st.b pc[pc<<0],pc
++ st.b r12[r12<<3],r12
++ st.b r5[r5<<2],r5
++ st.b r4[r4<<1],r4
++ st.b lr[lr<<1],lr
++ st.b r1[r8<<1],r6
++ st.b lr[lr<<3],r1
++ st.b r5[r0<<2],pc
++ .text
++ .global divs
++divs:
++ divs pc,pc,pc
++ divs r12,r12,r12
++ divs r5,r5,r5
++ divs r4,r4,r4
++ divs lr,lr,lr
++ divs r3,pc,pc
++ divs r9,r12,r2
++ divs r7,r4,r1
++ .text
++ .global add1
++add1:
++ add pc,pc
++ add r12,r12
++ add r5,r5
++ add r4,r4
++ add lr,lr
++ add r12,r9
++ add r6,r3
++ add r10,r12
++ .text
++ .global sub1
++sub1:
++ sub pc,pc
++ sub r12,r12
++ sub r5,r5
++ sub r4,r4
++ sub lr,lr
++ sub lr,r6
++ sub r0,sp
++ sub r6,r12
++ .text
++ .global rsub1
++rsub1:
++ rsub pc,pc
++ rsub r12,r12
++ rsub r5,r5
++ rsub r4,r4
++ rsub lr,lr
++ rsub r11,sp
++ rsub r7,r4
++ rsub r9,r1
++ .text
++ .global cp1
++cp1:
++ cp pc,pc
++ cp r12,r12
++ cp r5,r5
++ cp r4,r4
++ cp lr,lr
++ cp r6,r2
++ cp r0,r9
++ cp r3,sp
++ .text
++ .global or1
++or1:
++ or pc,pc
++ or r12,r12
++ or r5,r5
++ or r4,r4
++ or lr,lr
++ or r4,r9
++ or r11,r4
++ or r4,r0
++ .text
++ .global eor1
++eor1:
++ eor pc,pc
++ eor r12,r12
++ eor r5,r5
++ eor r4,r4
++ eor lr,lr
++ eor r12,r11
++ eor r0,r1
++ eor r5,pc
++ .text
++ .global and1
++and1:
++ and pc,pc
++ and r12,r12
++ and r5,r5
++ and r4,r4
++ and lr,lr
++ and r8,r1
++ and r0,sp
++ and r10,r5
++ .text
++ .global tst
++tst:
++ tst pc,pc
++ tst r12,r12
++ tst r5,r5
++ tst r4,r4
++ tst lr,lr
++ tst r0,r12
++ tst r10,r6
++ tst sp,r4
++ .text
++ .global andn
++andn:
++ andn pc,pc
++ andn r12,r12
++ andn r5,r5
++ andn r4,r4
++ andn lr,lr
++ andn r9,r12
++ andn r11,sp
++ andn r12,r5
++ .text
++ .global mov3
++mov3:
++ mov pc,pc
++ mov r12,r12
++ mov r5,r5
++ mov r4,r4
++ mov lr,lr
++ mov r5,r9
++ mov r11,r11
++ mov r2,lr
++ .text
++ .global st_w1
++st_w1:
++ st.w pc++,pc
++ st.w r12++,r12
++ st.w r5++,r5
++ st.w r4++,r4
++ st.w lr++,lr
++ st.w r1++,r11
++ st.w sp++,r0
++ st.w sp++,r1
++ .text
++ .global st_h1
++st_h1:
++ st.h pc++,pc
++ st.h r12++,r12
++ st.h r5++,r5
++ st.h r4++,r4
++ st.h lr++,lr
++ st.h r12++,sp
++ st.h r7++,lr
++ st.h r7++,r4
++ .text
++ .global st_b1
++st_b1:
++ st.b pc++,pc
++ st.b r12++,r12
++ st.b r5++,r5
++ st.b r4++,r4
++ st.b lr++,lr
++ st.b r9++,sp
++ st.b r1++,sp
++ st.b r0++,r4
++ .text
++ .global st_w2
++st_w2:
++ st.w --pc,pc
++ st.w --r12,r12
++ st.w --r5,r5
++ st.w --r4,r4
++ st.w --lr,lr
++ st.w --r1,r7
++ st.w --r3,r9
++ st.w --r5,r5
++ .text
++ .global st_h2
++st_h2:
++ st.h --pc,pc
++ st.h --r12,r12
++ st.h --r5,r5
++ st.h --r4,r4
++ st.h --lr,lr
++ st.h --r5,r7
++ st.h --r8,r8
++ st.h --r7,r2
++ .text
++ .global st_b2
++st_b2:
++ st.b --pc,pc
++ st.b --r12,r12
++ st.b --r5,r5
++ st.b --r4,r4
++ st.b --lr,lr
++ st.b --sp,sp
++ st.b --sp,r11
++ st.b --r4,r5
++ .text
++ .global ld_w1
++ld_w1:
++ ld.w pc,pc++
++ ld.w r12,r12++
++ ld.w r5,r5++
++ ld.w r4,r4++
++ ld.w lr,lr++
++ ld.w r3,r7++
++ ld.w r3,lr++
++ ld.w r12,r5++
++ .text
++ .global ld_sh1
++ld_sh1:
++ ld.sh pc,pc++
++ ld.sh r12,r12++
++ ld.sh r5,r5++
++ ld.sh r4,r4++
++ ld.sh lr,lr++
++ ld.sh r11,r2++
++ ld.sh r2,r8++
++ ld.sh r7,r6++
++ .text
++ .global ld_uh1
++ld_uh1:
++ ld.uh pc,pc++
++ ld.uh r12,r12++
++ ld.uh r5,r5++
++ ld.uh r4,r4++
++ ld.uh lr,lr++
++ ld.uh r6,r7++
++ ld.uh r10,r11++
++ ld.uh lr,r4++
++ .text
++ .global ld_ub1
++ld_ub1:
++ ld.ub pc,pc++
++ ld.ub r12,r12++
++ ld.ub r5,r5++
++ ld.ub r4,r4++
++ ld.ub lr,lr++
++ ld.ub r8,lr++
++ ld.ub r12,r12++
++ ld.ub r11,r10++
++ .text
++ .global ld_w2
++ld_w2:
++ ld.w pc,--pc
++ ld.w r12,--r12
++ ld.w r5,--r5
++ ld.w r4,--r4
++ ld.w lr,--lr
++ ld.w r10,--lr
++ ld.w r12,--r9
++ ld.w r6,--r5
++ .text
++ .global ld_sh2
++ld_sh2:
++ ld.sh pc,--pc
++ ld.sh r12,--r12
++ ld.sh r5,--r5
++ ld.sh r4,--r4
++ ld.sh lr,--lr
++ ld.sh pc,--r10
++ ld.sh r6,--r3
++ ld.sh r4,--r6
++ .text
++ .global ld_uh2
++ld_uh2:
++ ld.uh pc,--pc
++ ld.uh r12,--r12
++ ld.uh r5,--r5
++ ld.uh r4,--r4
++ ld.uh lr,--lr
++ ld.uh r3,--r2
++ ld.uh r1,--r0
++ ld.uh r2,--r9
++ .text
++ .global ld_ub2
++ld_ub2:
++ ld.ub pc,--pc
++ ld.ub r12,--r12
++ ld.ub r5,--r5
++ ld.ub r4,--r4
++ ld.ub lr,--lr
++ ld.ub r1,--r1
++ ld.ub r0,--r6
++ ld.ub r2,--r7
++ .text
++ .global ld_ub3
++ld_ub3:
++ ld.ub pc,pc[0]
++ ld.ub r12,r12[7]
++ ld.ub r5,r5[4]
++ ld.ub r4,r4[3]
++ ld.ub lr,lr[1]
++ ld.ub r6,r9[6]
++ ld.ub r2,lr[4]
++ ld.ub r1,r8[0]
++ .text
++ .global sub3_sp
++sub3_sp:
++ sub sp,0
++ sub sp,-4
++ sub sp,-512
++ sub sp,508
++ sub sp,4
++ sub sp,44
++ sub sp,8
++ sub sp,348
++ .text
++ .global sub3
++sub3:
++ sub pc,0
++ sub r12,-1
++ sub r5,-128
++ sub r4,127
++ sub lr,1
++ sub r6,-41
++ sub r4,37
++ sub r12,56
++ .text
++ .global mov1
++mov1:
++ mov pc,0
++ mov r12,-1
++ mov r5,-128
++ mov r4,127
++ mov lr,1
++ mov pc,14
++ mov r6,-100
++ mov lr,-122
++ .text
++ .global lddsp
++lddsp:
++ lddsp pc,sp[0]
++ lddsp r12,sp[508]
++ lddsp r5,sp[256]
++ lddsp r4,sp[252]
++ lddsp lr,sp[4]
++ lddsp lr,sp[256]
++ lddsp r12,sp[20]
++ lddsp r9,sp[472]
++ .text
++ .global lddpc
++lddpc:
++ lddpc pc,pc[0]
++ lddpc r0,pc[508]
++ lddpc r8,pc[256]
++ lddpc r7,pc[252]
++ lddpc lr,pc[4]
++ lddpc sp,pc[472]
++ lddpc r6,pc[120]
++ lddpc r11,pc[28]
++ .text
++ .global stdsp
++stdsp:
++ stdsp sp[0],pc
++ stdsp sp[508],r12
++ stdsp sp[256],r5
++ stdsp sp[252],r4
++ stdsp sp[4],lr
++ stdsp sp[304],pc
++ stdsp sp[256],r0
++ stdsp sp[336],r5
++ .text
++ .global cp2
++cp2:
++ cp pc,0
++ cp r12,-1
++ cp r5,-32
++ cp r4,31
++ cp lr,1
++ cp r8,3
++ cp lr,16
++ cp r7,-26
++ .text
++ .global acr
++acr:
++ acr pc
++ acr r12
++ acr r5
++ acr r4
++ acr lr
++ acr r2
++ acr r12
++ acr pc
++ .text
++ .global scr
++scr:
++ scr pc
++ scr r12
++ scr r5
++ scr r4
++ scr lr
++ scr pc
++ scr r6
++ scr r1
++ .text
++ .global cpc0
++cpc0:
++ cpc pc
++ cpc r12
++ cpc r5
++ cpc r4
++ cpc lr
++ cpc pc
++ cpc r4
++ cpc r9
++ .text
++ .global neg
++neg:
++ neg pc
++ neg r12
++ neg r5
++ neg r4
++ neg lr
++ neg r7
++ neg r1
++ neg r9
++ .text
++ .global abs
++abs:
++ abs pc
++ abs r12
++ abs r5
++ abs r4
++ abs lr
++ abs r6
++ abs r6
++ abs r4
++ .text
++ .global castu_b
++castu_b:
++ castu.b pc
++ castu.b r12
++ castu.b r5
++ castu.b r4
++ castu.b lr
++ castu.b r7
++ castu.b sp
++ castu.b r9
++ .text
++ .global casts_b
++casts_b:
++ casts.b pc
++ casts.b r12
++ casts.b r5
++ casts.b r4
++ casts.b lr
++ casts.b r11
++ casts.b r1
++ casts.b r10
++ .text
++ .global castu_h
++castu_h:
++ castu.h pc
++ castu.h r12
++ castu.h r5
++ castu.h r4
++ castu.h lr
++ castu.h r10
++ castu.h r11
++ castu.h r1
++ .text
++ .global casts_h
++casts_h:
++ casts.h pc
++ casts.h r12
++ casts.h r5
++ casts.h r4
++ casts.h lr
++ casts.h r0
++ casts.h r5
++ casts.h r9
++ .text
++ .global brev
++brev:
++ brev pc
++ brev r12
++ brev r5
++ brev r4
++ brev lr
++ brev r5
++ brev r10
++ brev r8
++ .text
++ .global swap_h
++swap_h:
++ swap.h pc
++ swap.h r12
++ swap.h r5
++ swap.h r4
++ swap.h lr
++ swap.h r7
++ swap.h r0
++ swap.h r8
++ .text
++ .global swap_b
++swap_b:
++ swap.b pc
++ swap.b r12
++ swap.b r5
++ swap.b r4
++ swap.b lr
++ swap.b r10
++ swap.b r12
++ swap.b r1
++ .text
++ .global swap_bh
++swap_bh:
++ swap.bh pc
++ swap.bh r12
++ swap.bh r5
++ swap.bh r4
++ swap.bh lr
++ swap.bh r9
++ swap.bh r4
++ swap.bh r1
++ .text
++ .global One_s_compliment
++One_s_compliment:
++ com pc
++ com r12
++ com r5
++ com r4
++ com lr
++ com r2
++ com r2
++ com r7
++ .text
++ .global tnbz
++tnbz:
++ tnbz pc
++ tnbz r12
++ tnbz r5
++ tnbz r4
++ tnbz lr
++ tnbz r8
++ tnbz r12
++ tnbz pc
++ .text
++ .global rol
++rol:
++ rol pc
++ rol r12
++ rol r5
++ rol r4
++ rol lr
++ rol r10
++ rol r9
++ rol r5
++ .text
++ .global ror
++ror:
++ ror pc
++ ror r12
++ ror r5
++ ror r4
++ ror lr
++ ror r8
++ ror r4
++ ror r7
++ .text
++ .global icall
++icall:
++ icall pc
++ icall r12
++ icall r5
++ icall r4
++ icall lr
++ icall r3
++ icall r1
++ icall r3
++ .text
++ .global mustr
++mustr:
++ mustr pc
++ mustr r12
++ mustr r5
++ mustr r4
++ mustr lr
++ mustr r1
++ mustr r4
++ mustr r12
++ .text
++ .global musfr
++musfr:
++ musfr pc
++ musfr r12
++ musfr r5
++ musfr r4
++ musfr lr
++ musfr r11
++ musfr r12
++ musfr r2
++ .text
++ .global ret_cond
++ret_cond:
++ reteq pc
++ retal r12
++ retls r5
++ retpl r4
++ retne lr
++ retgt r0
++ retgt r12
++ retge r10
++ .text
++ .global sr_cond
++sr_cond:
++ sreq pc
++ sral r12
++ srls r5
++ srpl r4
++ srne lr
++ srlt r0
++ sral sp
++ srge r9
++ .text
++ .global ld_w3
++ld_w3:
++ ld.w pc,pc[0]
++ ld.w r12,r12[124]
++ ld.w r5,r5[64]
++ ld.w r4,r4[60]
++ ld.w lr,lr[4]
++ ld.w sp,r2[52]
++ ld.w r9,r1[8]
++ ld.w r5,sp[60]
++ .text
++ .global ld_sh3
++ld_sh3:
++ ld.sh pc,pc[0]
++ ld.sh r12,r12[14]
++ ld.sh r5,r5[8]
++ ld.sh r4,r4[6]
++ ld.sh lr,lr[2]
++ ld.sh r4,r2[8]
++ ld.sh sp,lr[10]
++ ld.sh r2,r11[2]
++ .text
++ .global ld_uh3
++ld_uh3:
++ ld.uh pc,pc[0]
++ ld.uh r12,r12[14]
++ ld.uh r5,r5[8]
++ ld.uh r4,r4[6]
++ ld.uh lr,lr[2]
++ ld.uh r10,r0[10]
++ ld.uh r8,r11[8]
++ ld.uh r10,r2[12]
++ .text
++ .global st_w3
++st_w3:
++ st.w pc[0],pc
++ st.w r12[60],r12
++ st.w r5[32],r5
++ st.w r4[28],r4
++ st.w lr[4],lr
++ st.w r7[44],r11
++ st.w r2[24],r6
++ st.w r4[12],r9
++ .text
++ .global st_h3
++st_h3:
++ st.h pc[0],pc
++ st.h r12[14],r12
++ st.h r5[8],r5
++ st.h r4[6],r4
++ st.h lr[2],lr
++ st.h lr[10],r12
++ st.h r6[4],r0
++ st.h r5[12],sp
++ .text
++ .global st_b3
++st_b3:
++ st.b pc[0],pc
++ st.b r12[7],r12
++ st.b r5[4],r5
++ st.b r4[3],r4
++ st.b lr[1],lr
++ st.b r12[6],r9
++ st.b r2[3],lr
++ st.b r1[3],r11
++ .text
++ .global ldd
++ldd:
++ ld.d r0,pc
++ ld.d r14,r12
++ ld.d r8,r5
++ ld.d r6,r4
++ ld.d r2,lr
++ ld.d r14,r7
++ ld.d r4,r4
++ ld.d r14,pc
++ .text
++ .global ldd_postinc
++ldd_postinc:
++ ld.d r0,pc++
++ ld.d r14,r12++
++ ld.d r8,r5++
++ ld.d r6,r4++
++ ld.d r2,lr++
++ ld.d r14,r5++
++ ld.d r12,r11++
++ ld.d r2,r12++
++ .text
++ .global ldd_predec
++ldd_predec:
++ ld.d r0,--pc
++ ld.d r14,--r12
++ ld.d r8,--r5
++ ld.d r6,--r4
++ ld.d r2,--lr
++ ld.d r8,--r0
++ ld.d r10,--pc
++ ld.d r2,--r4
++ .text
++ .global std
++std:
++ st.d pc,r0
++ st.d r12,r14
++ st.d r5,r8
++ st.d r4,r6
++ st.d lr,r2
++ st.d r0,r12
++ st.d sp,r4
++ st.d r12,r12
++ .text
++ .global std_postinc
++std_postinc:
++ st.d pc++,r0
++ st.d r12++,r14
++ st.d r5++,r8
++ st.d r4++,r6
++ st.d lr++,r2
++ st.d sp++,r6
++ st.d r10++,r6
++ st.d r7++,r2
++ .text
++ .global std_predec
++std_predec:
++ st.d --pc,r0
++ st.d --r12,r14
++ st.d --r5,r8
++ st.d --r4,r6
++ st.d --lr,r2
++ st.d --r3,r6
++ st.d --lr,r2
++ st.d --r0,r4
++ .text
++ .global mul
++mul:
++ mul pc,pc
++ mul r12,r12
++ mul r5,r5
++ mul r4,r4
++ mul lr,lr
++ mul r10,lr
++ mul r0,r8
++ mul r8,r5
++ .text
++ .global asr_imm5
++asr_imm5:
++ asr pc,0
++ asr r12,31
++ asr r5,16
++ asr r4,15
++ asr lr,1
++ asr r6,23
++ asr r6,18
++ asr r5,8
++ .text
++ .global lsl_imm5
++lsl_imm5:
++ lsl pc,0
++ lsl r12,31
++ lsl r5,16
++ lsl r4,15
++ lsl lr,1
++ lsl r12,13
++ lsl r6,16
++ lsl r1,25
++ .text
++ .global lsr_imm5
++lsr_imm5:
++ lsr pc,0
++ lsr r12,31
++ lsr r5,16
++ lsr r4,15
++ lsr lr,1
++ lsr r0,1
++ lsr r8,10
++ lsr r7,26
++ .text
++ .global sbr
++sbr:
++ sbr pc,0
++ sbr r12,31
++ sbr r5,16
++ sbr r4,15
++ sbr lr,1
++ sbr r8,31
++ sbr r6,22
++ sbr r1,23
++ .text
++ .global cbr
++cbr:
++ cbr pc,0
++ cbr r12,31
++ cbr r5,16
++ cbr r4,15
++ cbr lr,1
++ cbr r12,10
++ cbr r7,22
++ cbr r8,9
++ .text
++ .global brc1
++brc1:
++ breq 0
++ brpl -2
++ brge -256
++ brcs 254
++ brne 2
++ brcs 230
++ breq -18
++ breq 12
++ .text
++ .global rjmp
++rjmp:
++ rjmp 0
++ rjmp -2
++ rjmp -1024
++ rjmp 1022
++ rjmp 2
++ rjmp -962
++ rjmp 14
++ rjmp -516
++ .text
++ .global rcall1
++rcall1:
++ rcall 0
++ rcall -2
++ rcall -1024
++ rcall 1022
++ rcall 2
++ rcall 216
++ rcall -530
++ rcall -972
++ .text
++ .global acall
++acall:
++ acall 0
++ acall 1020
++ acall 512
++ acall 508
++ acall 4
++ acall 356
++ acall 304
++ acall 172
++ .text
++ .global scall
++scall:
++ scall
++ scall
++ scall
++ scall
++ scall
++ scall
++ scall
++ scall
++ .text
++ .global popm
++popm:
++ /* popm with no argument fails currently */
++ popm pc
++ popm r0-r11,pc,r12=-1
++ popm lr
++ popm r0-r11,pc,r12=1
++ popm r0-r3
++ popm r4-r10,pc
++ popm r0-r3,r11,pc,r12=0
++ popm r0-r7,r10-r12,lr
++ .text
++ .global pushm
++pushm:
++ pushm pc
++ pushm r0-r12,lr,pc
++ pushm pc
++ pushm r0-r12,lr
++ pushm r0-r3
++ pushm r8-r10,lr,pc
++ pushm r0-r3,r10
++ pushm r8-r9,r12
++ .text
++ .global popm_n
++popm_n:
++ popm pc
++ popm r0-r11,pc,r12=-1
++ popm lr
++ popm r0-r11,pc,r12=1
++ popm r0-r3
++ popm r4-r10,pc
++ popm r0-r3,r11,pc,r12=0
++ popm r0-r7,r10-r12,lr
++ .text
++ .global pushm_n
++pushm_n:
++ pushm pc
++ pushm r0-r12,lr,pc
++ pushm pc
++ pushm r0-r12,lr
++ pushm r0-r3
++ pushm r8-r10,lr,pc
++ pushm r0-r3,r10
++ pushm r8-r9,r12
++ .text
++ .global csrfcz
++csrfcz:
++ csrfcz 0
++ csrfcz 31
++ csrfcz 16
++ csrfcz 15
++ csrfcz 1
++ csrfcz 5
++ csrfcz 13
++ csrfcz 23
++ .text
++ .global ssrf
++ssrf:
++ ssrf 0
++ ssrf 31
++ ssrf 16
++ ssrf 15
++ ssrf 1
++ ssrf 29
++ ssrf 13
++ ssrf 13
++ .text
++ .global csrf
++csrf:
++ csrf 0
++ csrf 31
++ csrf 16
++ csrf 15
++ csrf 1
++ csrf 10
++ csrf 15
++ csrf 11
++ .text
++ .global rete
++rete:
++ rete
++ .text
++ .global rets
++rets:
++ rets
++ .text
++ .global retd
++retd:
++ retd
++ .text
++ .global retj
++retj:
++ retj
++ .text
++ .global tlbr
++tlbr:
++ tlbr
++ .text
++ .global tlbs
++tlbs:
++ tlbs
++ .text
++ .global tlbw
++tlbw:
++ tlbw
++ .text
++ .global breakpoint
++breakpoint:
++ breakpoint
++ .text
++ .global incjosp
++incjosp:
++ incjosp 1
++ incjosp 2
++ incjosp 3
++ incjosp 4
++ incjosp -4
++ incjosp -3
++ incjosp -2
++ incjosp -1
++ .text
++ .global nop
++nop:
++ nop
++ .text
++ .global popjc
++popjc:
++ popjc
++ .text
++ .global pushjc
++pushjc:
++ pushjc
++ .text
++ .global add2
++add2:
++ add pc,pc,pc<<0
++ add r12,r12,r12<<3
++ add r5,r5,r5<<2
++ add r4,r4,r4<<1
++ add lr,lr,lr<<1
++ add r0,r12,r0<<1
++ add r9,r12,r4<<0
++ add r12,r12,r7<<2
++ .text
++ .global sub2
++sub2:
++ sub pc,pc,pc<<0
++ sub r12,r12,r12<<3
++ sub r5,r5,r5<<2
++ sub r4,r4,r4<<1
++ sub lr,lr,lr<<1
++ sub sp,r3,r4<<0
++ sub r3,r7,r3<<0
++ sub sp,r10,sp<<1
++ .text
++ .global divu
++divu:
++ divu pc,pc,pc
++ divu r12,r12,r12
++ divu r5,r5,r5
++ divu r4,r4,r4
++ divu lr,lr,lr
++ divu sp,r4,pc
++ divu r5,r5,sp
++ divu r10,sp,r0
++ .text
++ .global addhh_w
++addhh_w:
++ addhh.w pc,pc:b,pc:b
++ addhh.w r12,r12:t,r12:t
++ addhh.w r5,r5:t,r5:t
++ addhh.w r4,r4:b,r4:b
++ addhh.w lr,lr:t,lr:t
++ addhh.w r0,r0:b,r3:b
++ addhh.w lr,r12:t,r7:b
++ addhh.w r3,r10:t,r2:b
++ .text
++ .global subhh_w
++subhh_w:
++ subhh.w pc,pc:b,pc:b
++ subhh.w r12,r12:t,r12:t
++ subhh.w r5,r5:t,r5:t
++ subhh.w r4,r4:b,r4:b
++ subhh.w lr,lr:t,lr:t
++ subhh.w r10,r1:t,r7:b
++ subhh.w pc,r10:t,lr:t
++ subhh.w r3,r0:t,r12:b
++ .text
++ .global adc
++adc:
++ adc pc,pc,pc
++ adc r12,r12,r12
++ adc r5,r5,r5
++ adc r4,r4,r4
++ adc lr,lr,lr
++ adc r4,r0,r7
++ adc sp,r4,r3
++ adc r2,r12,r0
++ .text
++ .global sbc
++sbc:
++ sbc pc,pc,pc
++ sbc r12,r12,r12
++ sbc r5,r5,r5
++ sbc r4,r4,r4
++ sbc lr,lr,lr
++ sbc r6,r7,r9
++ sbc r0,r8,r5
++ sbc r1,r0,r4
++ .text
++ .global mul_2
++mul_2:
++ mul pc,pc,pc
++ mul r12,r12,r12
++ mul r5,r5,r5
++ mul r4,r4,r4
++ mul lr,lr,lr
++ mul pc,r0,r0
++ mul r8,pc,lr
++ mul r4,r12,pc
++ .text
++ .global mac
++mac:
++ mac pc,pc,pc
++ mac r12,r12,r12
++ mac r5,r5,r5
++ mac r4,r4,r4
++ mac lr,lr,lr
++ mac r10,r4,r0
++ mac r7,lr,r0
++ mac r2,r9,r12
++ .text
++ .global mulsd
++mulsd:
++ muls.d pc,pc,pc
++ muls.d r12,r12,r12
++ muls.d r5,r5,r5
++ muls.d r4,r4,r4
++ muls.d lr,lr,lr
++ muls.d r2,r8,lr
++ muls.d r4,r0,r11
++ muls.d r5,lr,r6
++ .text
++ .global macsd
++macsd:
++ macs.d r0,pc,pc
++ macs.d r14,r12,r12
++ macs.d r8,r5,r5
++ macs.d r6,r4,r4
++ macs.d r2,lr,lr
++ macs.d r8,r1,r9
++ macs.d r14,r8,r8
++ macs.d r4,r3,r12
++ .text
++ .global mulud
++mulud:
++ mulu.d r0,pc,pc
++ mulu.d r14,r12,r12
++ mulu.d r8,r5,r5
++ mulu.d r6,r4,r4
++ mulu.d r2,lr,lr
++ mulu.d r6,r5,r0
++ mulu.d r4,r6,r1
++ mulu.d r8,r8,r2
++ .text
++ .global macud
++macud:
++ macu.d r0,pc,pc
++ macu.d r14,r12,r12
++ macu.d r8,r5,r5
++ macu.d r6,r4,r4
++ macu.d r2,lr,lr
++ macu.d r6,sp,r11
++ macu.d r2,r4,r8
++ macu.d r6,r10,r9
++ .text
++ .global asr_1
++asr_1:
++ asr pc,pc,pc
++ asr r12,r12,r12
++ asr r5,r5,r5
++ asr r4,r4,r4
++ asr lr,lr,lr
++ asr pc,r6,pc
++ asr r0,r6,r12
++ asr r4,sp,r0
++ .text
++ .global lsl_1
++lsl_1:
++ lsl pc,pc,pc
++ lsl r12,r12,r12
++ lsl r5,r5,r5
++ lsl r4,r4,r4
++ lsl lr,lr,lr
++ lsl lr,r5,lr
++ lsl r5,pc,r3
++ lsl r1,pc,r9
++ .text
++ .global lsr_1
++lsr_1:
++ lsr pc,pc,pc
++ lsr r12,r12,r12
++ lsr r5,r5,r5
++ lsr r4,r4,r4
++ lsr lr,lr,lr
++ lsr r2,r4,r1
++ lsr r5,r1,r6
++ lsr sp,r6,r7
++ .text
++ .global xchg
++xchg:
++ xchg pc,pc,pc
++ xchg r12,r12,r12
++ xchg r5,r5,r5
++ xchg r4,r4,r4
++ xchg lr,lr,lr
++ xchg lr,r4,sp
++ xchg r1,r5,r12
++ xchg lr,r12,r0
++ .text
++ .global max
++max:
++ max pc,pc,pc
++ max r12,r12,r12
++ max r5,r5,r5
++ max r4,r4,r4
++ max lr,lr,lr
++ max lr,r2,sp
++ max r4,r10,r9
++ max lr,r9,lr
++ .text
++ .global min
++min:
++ min pc,pc,pc
++ min r12,r12,r12
++ min r5,r5,r5
++ min r4,r4,r4
++ min lr,lr,lr
++ min r9,r7,r8
++ min sp,r5,r5
++ min r4,r1,r4
++ .text
++ .global addabs
++addabs:
++ addabs pc,pc,pc
++ addabs r12,r12,r12
++ addabs r5,r5,r5
++ addabs r4,r4,r4
++ addabs lr,lr,lr
++ addabs r7,r10,r0
++ addabs r9,r9,r7
++ addabs r2,r8,r12
++ .text
++ .global mulnhh_w
++mulnhh_w:
++ mulnhh.w pc,pc:b,pc:b
++ mulnhh.w r12,r12:t,r12:t
++ mulnhh.w r5,r5:t,r5:t
++ mulnhh.w r4,r4:b,r4:b
++ mulnhh.w lr,lr:t,lr:t
++ mulnhh.w r11,sp:t,r9:b
++ mulnhh.w sp,r4:b,lr:t
++ mulnhh.w r12,r2:t,r11:b
++ .text
++ .global mulnwh_d
++mulnwh_d:
++ mulnwh.d r0,pc,pc:b
++ mulnwh.d r14,r12,r12:t
++ mulnwh.d r8,r5,r5:t
++ mulnwh.d r6,r4,r4:b
++ mulnwh.d r2,lr,lr:t
++ mulnwh.d r14,r3,r2:t
++ mulnwh.d r4,r5,r9:b
++ mulnwh.d r12,r4,r4:t
++ .text
++ .global machh_w
++machh_w:
++ machh.w pc,pc:b,pc:b
++ machh.w r12,r12:t,r12:t
++ machh.w r5,r5:t,r5:t
++ machh.w r4,r4:b,r4:b
++ machh.w lr,lr:t,lr:t
++ machh.w lr,r5:b,r1:t
++ machh.w r9,r6:b,r7:b
++ machh.w r5,lr:t,r12:b
++ .text
++ .global machh_d
++machh_d:
++ machh.d r0,pc:b,pc:b
++ machh.d r14,r12:t,r12:t
++ machh.d r8,r5:t,r5:t
++ machh.d r6,r4:b,r4:b
++ machh.d r2,lr:t,lr:t
++ machh.d r10,r0:b,r8:b
++ machh.d r14,r4:b,r5:t
++ machh.d r8,r0:b,r4:t
++ .text
++ .global macsathh_w
++macsathh_w:
++ macsathh.w pc,pc:b,pc:b
++ macsathh.w r12,r12:t,r12:t
++ macsathh.w r5,r5:t,r5:t
++ macsathh.w r4,r4:b,r4:b
++ macsathh.w lr,lr:t,lr:t
++ macsathh.w r7,r7:t,pc:t
++ macsathh.w r4,r2:t,r4:b
++ macsathh.w r4,r8:t,r3:t
++ .text
++ .global mulhh_w
++mulhh_w:
++ mulhh.w pc,pc:b,pc:b
++ mulhh.w r12,r12:t,r12:t
++ mulhh.w r5,r5:t,r5:t
++ mulhh.w r4,r4:b,r4:b
++ mulhh.w lr,lr:t,lr:t
++ mulhh.w r7,r4:t,r9:b
++ mulhh.w pc,r3:t,r7:t
++ mulhh.w pc,r4:b,r9:t
++ .text
++ .global mulsathh_h
++mulsathh_h:
++ mulsathh.h pc,pc:b,pc:b
++ mulsathh.h r12,r12:t,r12:t
++ mulsathh.h r5,r5:t,r5:t
++ mulsathh.h r4,r4:b,r4:b
++ mulsathh.h lr,lr:t,lr:t
++ mulsathh.h r3,r1:b,sp:b
++ mulsathh.h r11,lr:t,r11:b
++ mulsathh.h r8,r8:b,r11:t
++ .text
++ .global mulsathh_w
++mulsathh_w:
++ mulsathh.w pc,pc:b,pc:b
++ mulsathh.w r12,r12:t,r12:t
++ mulsathh.w r5,r5:t,r5:t
++ mulsathh.w r4,r4:b,r4:b
++ mulsathh.w lr,lr:t,lr:t
++ mulsathh.w lr,r11:t,r6:b
++ mulsathh.w r6,r6:b,r7:t
++ mulsathh.w r10,r2:b,r3:b
++ .text
++ .global mulsatrndhh_h
++mulsatrndhh_h:
++ mulsatrndhh.h pc,pc:b,pc:b
++ mulsatrndhh.h r12,r12:t,r12:t
++ mulsatrndhh.h r5,r5:t,r5:t
++ mulsatrndhh.h r4,r4:b,r4:b
++ mulsatrndhh.h lr,lr:t,lr:t
++ mulsatrndhh.h r11,r6:b,r9:b
++ mulsatrndhh.h r11,r3:b,r8:t
++ mulsatrndhh.h r5,sp:t,r7:t
++ .text
++ .global mulsatrndwh_w
++mulsatrndwh_w:
++ mulsatrndwh.w pc,pc,pc:b
++ mulsatrndwh.w r12,r12,r12:t
++ mulsatrndwh.w r5,r5,r5:t
++ mulsatrndwh.w r4,r4,r4:b
++ mulsatrndwh.w lr,lr,lr:t
++ mulsatrndwh.w r5,r12,r0:b
++ mulsatrndwh.w r7,r10,pc:b
++ mulsatrndwh.w r10,r8,r5:t
++ .text
++ .global macwh_d
++macwh_d:
++ macwh.d r0,pc,pc:b
++ macwh.d r14,r12,r12:t
++ macwh.d r8,r5,r5:t
++ macwh.d r6,r4,r4:b
++ macwh.d r2,lr,lr:t
++ macwh.d r4,r10,r12:t
++ macwh.d r4,r7,sp:b
++ macwh.d r14,r9,r11:b
++ .text
++ .global mulwh_d
++mulwh_d:
++ mulwh.d r0,pc,pc:b
++ mulwh.d r14,r12,r12:t
++ mulwh.d r8,r5,r5:t
++ mulwh.d r6,r4,r4:b
++ mulwh.d r2,lr,lr:t
++ mulwh.d r12,r5,r1:b
++ mulwh.d r0,r1,r3:t
++ mulwh.d r0,r9,r2:b
++ .text
++ .global mulsatwh_w
++mulsatwh_w:
++ mulsatwh.w pc,pc,pc:b
++ mulsatwh.w r12,r12,r12:t
++ mulsatwh.w r5,r5,r5:t
++ mulsatwh.w r4,r4,r4:b
++ mulsatwh.w lr,lr,lr:t
++ mulsatwh.w r11,pc,r10:t
++ mulsatwh.w sp,r12,r9:t
++ mulsatwh.w r0,r3,r2:t
++ .text
++ .global ldw7
++ldw7:
++ ld.w pc,pc[pc:b<<2]
++ ld.w r12,r12[r12:t<<2]
++ ld.w r5,r5[r5:u<<2]
++ ld.w r4,r4[r4:l<<2]
++ ld.w lr,lr[lr:l<<2]
++ ld.w r9,r10[r6:l<<2]
++ ld.w r2,r10[r10:b<<2]
++ ld.w r11,r5[pc:b<<2]
++ .text
++ .global satadd_w
++satadd_w:
++ satadd.w pc,pc,pc
++ satadd.w r12,r12,r12
++ satadd.w r5,r5,r5
++ satadd.w r4,r4,r4
++ satadd.w lr,lr,lr
++ satadd.w r4,r8,r11
++ satadd.w r3,r12,r6
++ satadd.w r3,lr,r9
++ .text
++ .global satsub_w1
++satsub_w1:
++ satsub.w pc,pc,pc
++ satsub.w r12,r12,r12
++ satsub.w r5,r5,r5
++ satsub.w r4,r4,r4
++ satsub.w lr,lr,lr
++ satsub.w r8,sp,r0
++ satsub.w r9,r8,r4
++ satsub.w pc,lr,r2
++ .text
++ .global satadd_h
++satadd_h:
++ satadd.h pc,pc,pc
++ satadd.h r12,r12,r12
++ satadd.h r5,r5,r5
++ satadd.h r4,r4,r4
++ satadd.h lr,lr,lr
++ satadd.h r7,r3,r9
++ satadd.h r1,r0,r2
++ satadd.h r1,r4,lr
++ .text
++ .global satsub_h
++satsub_h:
++ satsub.h pc,pc,pc
++ satsub.h r12,r12,r12
++ satsub.h r5,r5,r5
++ satsub.h r4,r4,r4
++ satsub.h lr,lr,lr
++ satsub.h lr,lr,r3
++ satsub.h r11,r6,r5
++ satsub.h r3,sp,r0
++ .text
++ .global mul3
++mul3:
++ mul pc,pc,0
++ mul r12,r12,-1
++ mul r5,r5,-128
++ mul r4,r4,127
++ mul lr,lr,1
++ mul r12,r2,-7
++ mul r1,pc,95
++ mul r4,r6,19
++ .text
++ .global rsub2
++rsub2:
++ rsub pc,pc,0
++ rsub r12,r12,-1
++ rsub r5,r5,-128
++ rsub r4,r4,127
++ rsub lr,lr,1
++ rsub r9,lr,96
++ rsub r11,r1,56
++ rsub r0,r7,-87
++ .text
++ .global clz
++clz:
++ clz pc,pc
++ clz r12,r12
++ clz r5,r5
++ clz r4,r4
++ clz lr,lr
++ clz r2,r3
++ clz r5,r11
++ clz pc,r3
++ .text
++ .global cpc1
++cpc1:
++ cpc pc,pc
++ cpc r12,r12
++ cpc r5,r5
++ cpc r4,r4
++ cpc lr,lr
++ cpc pc,r4
++ cpc r5,r9
++ cpc r6,r7
++ .text
++ .global asr3
++asr3:
++ asr pc,pc,0
++ asr r12,r12,31
++ asr r5,r5,16
++ asr r4,r4,15
++ asr lr,lr,1
++ asr r4,r11,19
++ asr sp,pc,26
++ asr r11,sp,8
++ .text
++ .global lsl3
++lsl3:
++ lsl pc,pc,0
++ lsl r12,r12,31
++ lsl r5,r5,16
++ lsl r4,r4,15
++ lsl lr,lr,1
++ lsl r8,r10,17
++ lsl r2,lr,3
++ lsl lr,r11,14
++ .text
++ .global lsr3
++lsr3:
++ lsr pc,pc,0
++ lsr r12,r12,31
++ lsr r5,r5,16
++ lsr r4,r4,15
++ lsr lr,lr,1
++ lsr r4,r3,31
++ lsr pc,r9,14
++ lsr r3,r0,6
++/* .text
++ .global extract_b
++extract_b:
++ extract.b pc,pc:b
++ extract.b r12,r12:t
++ extract.b r5,r5:u
++ extract.b r4,r4:l
++ extract.b lr,lr:l
++ extract.b r2,r5:l
++ extract.b r12,r3:l
++ extract.b sp,r3:l
++ .text
++ .global insert_b
++insert_b:
++ insert.b pc:b,pc
++ insert.b r12:t,r12
++ insert.b r5:u,r5
++ insert.b r4:l,r4
++ insert.b lr:l,lr
++ insert.b r12:u,r3
++ insert.b r10:l,lr
++ insert.b r11:l,r12
++ .text
++ .global extract_h
++extract_h:
++ extract.h pc,pc:b
++ extract.h r12,r12:t
++ extract.h r5,r5:t
++ extract.h r4,r4:b
++ extract.h lr,lr:t
++ extract.h r11,lr:b
++ extract.h r10,r0:b
++ extract.h r11,r12:b
++ .text
++ .global insert_h
++insert_h:
++ insert.h pc:b,pc
++ insert.h r12:t,r12
++ insert.h r5:t,r5
++ insert.h r4:b,r4
++ insert.h lr:t,lr
++ insert.h r12:t,r11
++ insert.h r7:b,r6
++ insert.h r1:t,r11 */
++ .text
++ .global movc1
++movc1:
++ moveq pc,pc
++ moval r12,r12
++ movls r5,r5
++ movpl r4,r4
++ movne lr,lr
++ movne pc,r11
++ movmi r10,r2
++ movls r8,r12
++ .text
++ .global padd_h
++padd_h:
++ padd.h pc,pc,pc
++ padd.h r12,r12,r12
++ padd.h r5,r5,r5
++ padd.h r4,r4,r4
++ padd.h lr,lr,lr
++ padd.h r8,r2,r7
++ padd.h r0,r0,r3
++ padd.h sp,r11,r6
++ .text
++ .global psub_h
++psub_h:
++ psub.h pc,pc,pc
++ psub.h r12,r12,r12
++ psub.h r5,r5,r5
++ psub.h r4,r4,r4
++ psub.h lr,lr,lr
++ psub.h lr,r6,r8
++ psub.h r0,r1,sp
++ psub.h pc,pc,sp
++ .text
++ .global paddx_h
++paddx_h:
++ paddx.h pc,pc,pc
++ paddx.h r12,r12,r12
++ paddx.h r5,r5,r5
++ paddx.h r4,r4,r4
++ paddx.h lr,lr,lr
++ paddx.h pc,pc,r1
++ paddx.h r10,r4,r5
++ paddx.h r5,pc,r2
++ .text
++ .global psubx_h
++psubx_h:
++ psubx.h pc,pc,pc
++ psubx.h r12,r12,r12
++ psubx.h r5,r5,r5
++ psubx.h r4,r4,r4
++ psubx.h lr,lr,lr
++ psubx.h r5,r12,r5
++ psubx.h r3,r8,r3
++ psubx.h r5,r2,r3
++ .text
++ .global padds_sh
++padds_sh:
++ padds.sh pc,pc,pc
++ padds.sh r12,r12,r12
++ padds.sh r5,r5,r5
++ padds.sh r4,r4,r4
++ padds.sh lr,lr,lr
++ padds.sh r9,lr,r2
++ padds.sh r6,r8,r1
++ padds.sh r6,r4,r10
++ .text
++ .global psubs_sh
++psubs_sh:
++ psubs.sh pc,pc,pc
++ psubs.sh r12,r12,r12
++ psubs.sh r5,r5,r5
++ psubs.sh r4,r4,r4
++ psubs.sh lr,lr,lr
++ psubs.sh r6,lr,r11
++ psubs.sh r2,r12,r4
++ psubs.sh r0,r9,r0
++ .text
++ .global paddxs_sh
++paddxs_sh:
++ paddxs.sh pc,pc,pc
++ paddxs.sh r12,r12,r12
++ paddxs.sh r5,r5,r5
++ paddxs.sh r4,r4,r4
++ paddxs.sh lr,lr,lr
++ paddxs.sh r0,r3,r9
++ paddxs.sh pc,r10,r11
++ paddxs.sh pc,r10,pc
++ .text
++ .global psubxs_sh
++psubxs_sh:
++ psubxs.sh pc,pc,pc
++ psubxs.sh r12,r12,r12
++ psubxs.sh r5,r5,r5
++ psubxs.sh r4,r4,r4
++ psubxs.sh lr,lr,lr
++ psubxs.sh r7,r4,r4
++ psubxs.sh r7,r8,r3
++ psubxs.sh pc,r6,r5
++ .text
++ .global padds_uh
++padds_uh:
++ padds.uh pc,pc,pc
++ padds.uh r12,r12,r12
++ padds.uh r5,r5,r5
++ padds.uh r4,r4,r4
++ padds.uh lr,lr,lr
++ padds.uh r12,r11,r7
++ padds.uh r7,r8,lr
++ padds.uh r6,r9,r7
++ .text
++ .global psubs_uh
++psubs_uh:
++ psubs.uh pc,pc,pc
++ psubs.uh r12,r12,r12
++ psubs.uh r5,r5,r5
++ psubs.uh r4,r4,r4
++ psubs.uh lr,lr,lr
++ psubs.uh lr,r10,r6
++ psubs.uh sp,r2,pc
++ psubs.uh r2,r9,r2
++ .text
++ .global paddxs_uh
++paddxs_uh:
++ paddxs.uh pc,pc,pc
++ paddxs.uh r12,r12,r12
++ paddxs.uh r5,r5,r5
++ paddxs.uh r4,r4,r4
++ paddxs.uh lr,lr,lr
++ paddxs.uh r7,r9,r5
++ paddxs.uh r9,r1,r4
++ paddxs.uh r5,r2,r3
++ .text
++ .global psubxs_uh
++psubxs_uh:
++ psubxs.uh pc,pc,pc
++ psubxs.uh r12,r12,r12
++ psubxs.uh r5,r5,r5
++ psubxs.uh r4,r4,r4
++ psubxs.uh lr,lr,lr
++ psubxs.uh sp,r5,sp
++ psubxs.uh sp,r6,r6
++ psubxs.uh r3,r11,r8
++ .text
++ .global paddh_sh
++paddh_sh:
++ paddh.sh pc,pc,pc
++ paddh.sh r12,r12,r12
++ paddh.sh r5,r5,r5
++ paddh.sh r4,r4,r4
++ paddh.sh lr,lr,lr
++ paddh.sh r12,sp,r3
++ paddh.sh pc,r5,r3
++ paddh.sh r8,r8,sp
++ .text
++ .global psubh_sh
++psubh_sh:
++ psubh.sh pc,pc,pc
++ psubh.sh r12,r12,r12
++ psubh.sh r5,r5,r5
++ psubh.sh r4,r4,r4
++ psubh.sh lr,lr,lr
++ psubh.sh r1,r5,r8
++ psubh.sh r7,r3,r6
++ psubh.sh r4,r3,r3
++ .text
++ .global paddxh_sh
++paddxh_sh:
++ paddxh.sh pc,pc,pc
++ paddxh.sh r12,r12,r12
++ paddxh.sh r5,r5,r5
++ paddxh.sh r4,r4,r4
++ paddxh.sh lr,lr,lr
++ paddxh.sh r6,r0,r4
++ paddxh.sh r9,r8,r9
++ paddxh.sh r3,r0,sp
++ .text
++ .global psubxh_sh
++psubxh_sh:
++ psubxh.sh pc,pc,pc
++ psubxh.sh r12,r12,r12
++ psubxh.sh r5,r5,r5
++ psubxh.sh r4,r4,r4
++ psubxh.sh lr,lr,lr
++ psubxh.sh r4,pc,r12
++ psubxh.sh r8,r4,r6
++ psubxh.sh r12,r9,r4
++ .text
++ .global paddsub_h
++paddsub_h:
++ paddsub.h pc,pc:b,pc:b
++ paddsub.h r12,r12:t,r12:t
++ paddsub.h r5,r5:t,r5:t
++ paddsub.h r4,r4:b,r4:b
++ paddsub.h lr,lr:t,lr:t
++ paddsub.h r5,r2:t,lr:b
++ paddsub.h r7,r1:b,r8:b
++ paddsub.h r6,r10:t,r5:t
++ .text
++ .global psubadd_h
++psubadd_h:
++ psubadd.h pc,pc:b,pc:b
++ psubadd.h r12,r12:t,r12:t
++ psubadd.h r5,r5:t,r5:t
++ psubadd.h r4,r4:b,r4:b
++ psubadd.h lr,lr:t,lr:t
++ psubadd.h r9,r11:t,r8:t
++ psubadd.h r10,r7:t,lr:t
++ psubadd.h r6,pc:t,pc:b
++ .text
++ .global paddsubs_sh
++paddsubs_sh:
++ paddsubs.sh pc,pc:b,pc:b
++ paddsubs.sh r12,r12:t,r12:t
++ paddsubs.sh r5,r5:t,r5:t
++ paddsubs.sh r4,r4:b,r4:b
++ paddsubs.sh lr,lr:t,lr:t
++ paddsubs.sh r0,lr:t,r0:b
++ paddsubs.sh r9,r2:t,r4:t
++ paddsubs.sh r12,r9:t,sp:t
++ .text
++ .global psubadds_sh
++psubadds_sh:
++ psubadds.sh pc,pc:b,pc:b
++ psubadds.sh r12,r12:t,r12:t
++ psubadds.sh r5,r5:t,r5:t
++ psubadds.sh r4,r4:b,r4:b
++ psubadds.sh lr,lr:t,lr:t
++ psubadds.sh pc,lr:b,r1:t
++ psubadds.sh r11,r3:b,r12:b
++ psubadds.sh r10,r2:t,r8:t
++ .text
++ .global paddsubs_uh
++paddsubs_uh:
++ paddsubs.uh pc,pc:b,pc:b
++ paddsubs.uh r12,r12:t,r12:t
++ paddsubs.uh r5,r5:t,r5:t
++ paddsubs.uh r4,r4:b,r4:b
++ paddsubs.uh lr,lr:t,lr:t
++ paddsubs.uh r9,r2:b,r3:b
++ paddsubs.uh sp,sp:b,r7:t
++ paddsubs.uh lr,r0:b,r10:t
++ .text
++ .global psubadds_uh
++psubadds_uh:
++ psubadds.uh pc,pc:b,pc:b
++ psubadds.uh r12,r12:t,r12:t
++ psubadds.uh r5,r5:t,r5:t
++ psubadds.uh r4,r4:b,r4:b
++ psubadds.uh lr,lr:t,lr:t
++ psubadds.uh r12,r9:t,pc:t
++ psubadds.uh r8,r6:b,r8:b
++ psubadds.uh r8,r8:b,r4:b
++ .text
++ .global paddsubh_sh
++paddsubh_sh:
++ paddsubh.sh pc,pc:b,pc:b
++ paddsubh.sh r12,r12:t,r12:t
++ paddsubh.sh r5,r5:t,r5:t
++ paddsubh.sh r4,r4:b,r4:b
++ paddsubh.sh lr,lr:t,lr:t
++ paddsubh.sh r8,r9:t,r9:b
++ paddsubh.sh r0,sp:t,r1:t
++ paddsubh.sh r3,r1:b,r0:t
++ .text
++ .global psubaddh_sh
++psubaddh_sh:
++ psubaddh.sh pc,pc:b,pc:b
++ psubaddh.sh r12,r12:t,r12:t
++ psubaddh.sh r5,r5:t,r5:t
++ psubaddh.sh r4,r4:b,r4:b
++ psubaddh.sh lr,lr:t,lr:t
++ psubaddh.sh r7,r3:t,r10:b
++ psubaddh.sh r7,r2:t,r1:t
++ psubaddh.sh r11,r3:b,r6:b
++ .text
++ .global padd_b
++padd_b:
++ padd.b pc,pc,pc
++ padd.b r12,r12,r12
++ padd.b r5,r5,r5
++ padd.b r4,r4,r4
++ padd.b lr,lr,lr
++ padd.b r2,r6,pc
++ padd.b r8,r9,r12
++ padd.b r5,r12,r3
++ .text
++ .global psub_b
++psub_b:
++ psub.b pc,pc,pc
++ psub.b r12,r12,r12
++ psub.b r5,r5,r5
++ psub.b r4,r4,r4
++ psub.b lr,lr,lr
++ psub.b r0,r12,pc
++ psub.b r7,sp,r10
++ psub.b r5,sp,r12
++ .text
++ .global padds_sb
++padds_sb:
++ padds.sb pc,pc,pc
++ padds.sb r12,r12,r12
++ padds.sb r5,r5,r5
++ padds.sb r4,r4,r4
++ padds.sb lr,lr,lr
++ padds.sb sp,r11,r4
++ padds.sb r11,r10,r11
++ padds.sb r5,r12,r6
++ .text
++ .global psubs_sb
++psubs_sb:
++ psubs.sb pc,pc,pc
++ psubs.sb r12,r12,r12
++ psubs.sb r5,r5,r5
++ psubs.sb r4,r4,r4
++ psubs.sb lr,lr,lr
++ psubs.sb r7,r6,r8
++ psubs.sb r12,r10,r9
++ psubs.sb pc,r11,r0
++ .text
++ .global padds_ub
++padds_ub:
++ padds.ub pc,pc,pc
++ padds.ub r12,r12,r12
++ padds.ub r5,r5,r5
++ padds.ub r4,r4,r4
++ padds.ub lr,lr,lr
++ padds.ub r3,r2,r11
++ padds.ub r10,r8,r1
++ padds.ub r11,r8,r10
++ .text
++ .global psubs_ub
++psubs_ub:
++ psubs.ub pc,pc,pc
++ psubs.ub r12,r12,r12
++ psubs.ub r5,r5,r5
++ psubs.ub r4,r4,r4
++ psubs.ub lr,lr,lr
++ psubs.ub r0,r2,r7
++ psubs.ub lr,r5,r3
++ psubs.ub r6,r7,r9
++ .text
++ .global paddh_ub
++paddh_ub:
++ paddh.ub pc,pc,pc
++ paddh.ub r12,r12,r12
++ paddh.ub r5,r5,r5
++ paddh.ub r4,r4,r4
++ paddh.ub lr,lr,lr
++ paddh.ub lr,r1,r0
++ paddh.ub r2,r7,r7
++ paddh.ub r2,r1,r2
++ .text
++ .global psubh_ub
++psubh_ub:
++ psubh.ub pc,pc,pc
++ psubh.ub r12,r12,r12
++ psubh.ub r5,r5,r5
++ psubh.ub r4,r4,r4
++ psubh.ub lr,lr,lr
++ psubh.ub r0,r1,r6
++ psubh.ub r4,lr,r10
++ psubh.ub r9,r8,r1
++ .text
++ .global pmax_ub
++pmax_ub:
++ pmax.ub pc,pc,pc
++ pmax.ub r12,r12,r12
++ pmax.ub r5,r5,r5
++ pmax.ub r4,r4,r4
++ pmax.ub lr,lr,lr
++ pmax.ub pc,r2,r11
++ pmax.ub r12,r1,r1
++ pmax.ub r5,r2,r0
++ .text
++ .global pmax_sh
++pmax_sh:
++ pmax.sh pc,pc,pc
++ pmax.sh r12,r12,r12
++ pmax.sh r5,r5,r5
++ pmax.sh r4,r4,r4
++ pmax.sh lr,lr,lr
++ pmax.sh lr,r6,r12
++ pmax.sh r2,pc,r5
++ pmax.sh pc,r2,r7
++ .text
++ .global pmin_ub
++pmin_ub:
++ pmin.ub pc,pc,pc
++ pmin.ub r12,r12,r12
++ pmin.ub r5,r5,r5
++ pmin.ub r4,r4,r4
++ pmin.ub lr,lr,lr
++ pmin.ub r8,r1,r5
++ pmin.ub r1,r8,r3
++ pmin.ub r0,r2,r7
++ .text
++ .global pmin_sh
++pmin_sh:
++ pmin.sh pc,pc,pc
++ pmin.sh r12,r12,r12
++ pmin.sh r5,r5,r5
++ pmin.sh r4,r4,r4
++ pmin.sh lr,lr,lr
++ pmin.sh r8,r4,r10
++ pmin.sh lr,r10,r12
++ pmin.sh r2,r6,r2
++ .text
++ .global pavg_ub
++pavg_ub:
++ pavg.ub pc,pc,pc
++ pavg.ub r12,r12,r12
++ pavg.ub r5,r5,r5
++ pavg.ub r4,r4,r4
++ pavg.ub lr,lr,lr
++ pavg.ub r0,r1,r6
++ pavg.ub r8,r3,r6
++ pavg.ub pc,r12,r10
++ .text
++ .global pavg_sh
++pavg_sh:
++ pavg.sh pc,pc,pc
++ pavg.sh r12,r12,r12
++ pavg.sh r5,r5,r5
++ pavg.sh r4,r4,r4
++ pavg.sh lr,lr,lr
++ pavg.sh r9,pc,sp
++ pavg.sh pc,sp,r3
++ pavg.sh r6,r1,r9
++ .text
++ .global pabs_sb
++pabs_sb:
++ pabs.sb pc,pc
++ pabs.sb r12,r12
++ pabs.sb r5,r5
++ pabs.sb r4,r4
++ pabs.sb lr,lr
++ pabs.sb r11,r6
++ pabs.sb lr,r9
++ pabs.sb sp,r7
++ .text
++ .global pabs_sh
++pabs_sh:
++ pabs.sh pc,pc
++ pabs.sh r12,r12
++ pabs.sh r5,r5
++ pabs.sh r4,r4
++ pabs.sh lr,lr
++ pabs.sh pc,r3
++ pabs.sh r5,r7
++ pabs.sh r4,r0
++ .text
++ .global psad
++psad:
++ psad pc,pc,pc
++ psad r12,r12,r12
++ psad r5,r5,r5
++ psad r4,r4,r4
++ psad lr,lr,lr
++ psad r9,r11,r11
++ psad lr,r4,sp
++ psad lr,r4,r5
++ .text
++ .global pasr_b
++pasr_b:
++ pasr.b pc,pc,0
++ pasr.b r12,r12,7
++ pasr.b r5,r5,4
++ pasr.b r4,r4,3
++ pasr.b lr,lr,1
++ pasr.b pc,r7,1
++ pasr.b sp,lr,6
++ pasr.b sp,r3,2
++ .text
++ .global plsl_b
++plsl_b:
++ plsl.b pc,pc,0
++ plsl.b r12,r12,7
++ plsl.b r5,r5,4
++ plsl.b r4,r4,3
++ plsl.b lr,lr,1
++ plsl.b r2,r11,4
++ plsl.b r8,r5,7
++ plsl.b pc,r0,2
++ .text
++ .global plsr_b
++plsr_b:
++ plsr.b pc,pc,0
++ plsr.b r12,r12,7
++ plsr.b r5,r5,4
++ plsr.b r4,r4,3
++ plsr.b lr,lr,1
++ plsr.b r12,r1,2
++ plsr.b r6,pc,7
++ plsr.b r12,r11,2
++ .text
++ .global pasr_h
++pasr_h:
++ pasr.h pc,pc,0
++ pasr.h r12,r12,15
++ pasr.h r5,r5,8
++ pasr.h r4,r4,7
++ pasr.h lr,lr,1
++ pasr.h r0,r11,10
++ pasr.h r4,r6,8
++ pasr.h r6,r2,4
++ .text
++ .global plsl_h
++plsl_h:
++ plsl.h pc,pc,0
++ plsl.h r12,r12,15
++ plsl.h r5,r5,8
++ plsl.h r4,r4,7
++ plsl.h lr,lr,1
++ plsl.h r5,r10,9
++ plsl.h sp,lr,8
++ plsl.h r0,lr,7
++ .text
++ .global plsr_h
++plsr_h:
++ plsr.h pc,pc,0
++ plsr.h r12,r12,15
++ plsr.h r5,r5,8
++ plsr.h r4,r4,7
++ plsr.h lr,lr,1
++ plsr.h r11,r0,15
++ plsr.h lr,r3,3
++ plsr.h r8,lr,10
++ .text
++ .global packw_sh
++packw_sh:
++ packw.sh pc,pc,pc
++ packw.sh r12,r12,r12
++ packw.sh r5,r5,r5
++ packw.sh r4,r4,r4
++ packw.sh lr,lr,lr
++ packw.sh sp,r11,r10
++ packw.sh r8,r2,r12
++ packw.sh r8,r1,r5
++ .text
++ .global punpckub_h
++punpckub_h:
++ punpckub.h pc,pc:b
++ punpckub.h r12,r12:t
++ punpckub.h r5,r5:t
++ punpckub.h r4,r4:b
++ punpckub.h lr,lr:t
++ punpckub.h r6,r1:t
++ punpckub.h lr,r5:b
++ punpckub.h lr,r2:t
++ .text
++ .global punpcksb_h
++punpcksb_h:
++ punpcksb.h pc,pc:b
++ punpcksb.h r12,r12:t
++ punpcksb.h r5,r5:t
++ punpcksb.h r4,r4:b
++ punpcksb.h lr,lr:t
++ punpcksb.h r4,r7:t
++ punpcksb.h r6,lr:b
++ punpcksb.h r12,r12:t
++ .text
++ .global packsh_ub
++packsh_ub:
++ packsh.ub pc,pc,pc
++ packsh.ub r12,r12,r12
++ packsh.ub r5,r5,r5
++ packsh.ub r4,r4,r4
++ packsh.ub lr,lr,lr
++ packsh.ub r3,r6,r3
++ packsh.ub r8,r0,r3
++ packsh.ub r9,r3,lr
++ .text
++ .global packsh_sb
++packsh_sb:
++ packsh.sb pc,pc,pc
++ packsh.sb r12,r12,r12
++ packsh.sb r5,r5,r5
++ packsh.sb r4,r4,r4
++ packsh.sb lr,lr,lr
++ packsh.sb r6,r8,r1
++ packsh.sb lr,r9,r8
++ packsh.sb sp,r6,r6
++ .text
++ .global andl
++andl:
++ andl pc,0
++ andl r12,65535
++ andl r5,32768
++ andl r4,32767
++ andl lr,1
++ andl pc,23128
++ andl r8,47262
++ andl r7,13719
++ .text
++ .global andl_coh
++andl_coh:
++ andl pc,0,COH
++ andl r12,65535,COH
++ andl r5,32768,COH
++ andl r4,32767,COH
++ andl lr,1,COH
++ andl r6,22753,COH
++ andl r0,40653,COH
++ andl r4,48580,COH
++ .text
++ .global andh
++andh:
++ andh pc,0
++ andh r12,65535
++ andh r5,32768
++ andh r4,32767
++ andh lr,1
++ andh r12,52312
++ andh r3,8675
++ andh r2,42987
++ .text
++ .global andh_coh
++andh_coh:
++ andh pc,0,COH
++ andh r12,65535,COH
++ andh r5,32768,COH
++ andh r4,32767,COH
++ andh lr,1,COH
++ andh r11,34317,COH
++ andh r8,52982,COH
++ andh r10,23683,COH
++ .text
++ .global orl
++orl:
++ orl pc,0
++ orl r12,65535
++ orl r5,32768
++ orl r4,32767
++ orl lr,1
++ orl sp,16766
++ orl r0,21181
++ orl pc,44103
++ .text
++ .global orh
++orh:
++ orh pc,0
++ orh r12,65535
++ orh r5,32768
++ orh r4,32767
++ orh lr,1
++ orh r8,28285
++ orh r12,30492
++ orh r1,59930
++ .text
++ .global eorl
++eorl:
++ eorl pc,0
++ eorl r12,65535
++ eorl r5,32768
++ eorl r4,32767
++ eorl lr,1
++ eorl r4,51129
++ eorl r6,64477
++ eorl r1,20913
++ .text
++ .global eorh
++eorh:
++ eorh pc,0
++ eorh r12,65535
++ eorh r5,32768
++ eorh r4,32767
++ eorh lr,1
++ eorh r0,11732
++ eorh r10,38069
++ eorh r9,57130
++ .text
++ .global mcall
++mcall:
++ mcall pc[0]
++ mcall r12[-4]
++ mcall r5[-131072]
++ mcall r4[131068]
++ mcall lr[4]
++ mcall sp[61180]
++ mcall r4[-35000]
++ mcall r0[9924]
++ .text
++ .global pref
++pref:
++ pref pc[0]
++ pref r12[-1]
++ pref r5[-32768]
++ pref r4[32767]
++ pref lr[1]
++ pref r7[7748]
++ pref r7[-7699]
++ pref r2[-25892]
++ .text
++ .global cache
++cache:
++ cache pc[0],0
++ cache r12[-1],31
++ cache r5[-1024],16
++ cache r4[1023],15
++ cache lr[1],1
++ cache r3[-964],17
++ cache r4[-375],22
++ cache r3[-888],17
++ .text
++ .global sub4
++sub4:
++ sub pc,0
++ sub r12,-1
++ sub r5,-1048576
++ sub r4,1048575
++ sub lr,1
++ sub r2,-619156
++ sub lr,461517
++ sub r8,-185051
++ .text
++ .global cp3
++cp3:
++ cp pc,0
++ cp r12,-1
++ cp r5,-1048576
++ cp r4,1048575
++ cp lr,1
++ cp r1,124078
++ cp r0,-378909
++ cp r4,-243180
++ .text
++ .global mov2
++mov2:
++ mov pc,0
++ mov r12,-1
++ mov r5,-1048576
++ mov r4,1048575
++ mov lr,1
++ mov r5,-317021
++ mov sp,-749164
++ mov r5,940179
++ .text
++ .global brc2
++brc2:
++ breq 0
++ bral -2
++ brls -2097152
++ brpl 2097150
++ brne 2
++ brhi -1796966
++ brqs 1321368
++ brls -577434
++ .text
++ .global rcall2
++rcall2:
++ rcall 0
++ rcall -2
++ rcall -2097152
++ rcall 2097150
++ rcall 2
++ rcall 496820
++ rcall 1085092
++ rcall -1058
++ .text
++ .global sub5
++sub5:
++ sub pc,pc,0
++ sub r12,r12,-1
++ sub r5,r5,-32768
++ sub r4,r4,32767
++ sub lr,lr,1
++ sub pc,pc,-12744
++ sub r7,r7,-27365
++ sub r2,r9,-17358
++ .text
++ .global satsub_w2
++satsub_w2:
++ satsub.w pc,pc,0
++ satsub.w r12,r12,-1
++ satsub.w r5,r5,-32768
++ satsub.w r4,r4,32767
++ satsub.w lr,lr,1
++ satsub.w r2,lr,-2007
++ satsub.w r7,r12,-784
++ satsub.w r4,r7,23180
++ .text
++ .global ld_d4
++ld_d4:
++ ld.d r0,pc[0]
++ ld.d r14,r12[-1]
++ ld.d r8,r5[-32768]
++ ld.d r6,r4[32767]
++ ld.d r2,lr[1]
++ ld.d r14,r11[14784]
++ ld.d r6,r9[-18905]
++ ld.d r2,r3[-6355]
++ .text
++ .global ld_w4
++ld_w4:
++ ld.w pc,pc[0]
++ ld.w r12,r12[-1]
++ ld.w r5,r5[-32768]
++ ld.w r4,r4[32767]
++ ld.w lr,lr[1]
++ ld.w r0,r12[-22133]
++ ld.w sp,pc[-20521]
++ /* ld.w r3,r5[29035] */
++ nop
++ .text
++ .global ld_sh4
++ld_sh4:
++ ld.sh pc,pc[0]
++ ld.sh r12,r12[-1]
++ ld.sh r5,r5[-32768]
++ ld.sh r4,r4[32767]
++ ld.sh lr,lr[1]
++ ld.sh r6,r10[30930]
++ ld.sh r6,r10[21973]
++ /* ld.sh r11,r10[-2058] */
++ nop
++ .text
++ .global ld_uh4
++ld_uh4:
++ ld.uh pc,pc[0]
++ ld.uh r12,r12[-1]
++ ld.uh r5,r5[-32768]
++ ld.uh r4,r4[32767]
++ ld.uh lr,lr[1]
++ ld.uh r1,r9[-13354]
++ ld.uh lr,r11[21337]
++ /* ld.uh r2,lr[-25370] */
++ nop
++ .text
++ .global ld_sb1
++ld_sb1:
++ ld.sb pc,pc[0]
++ ld.sb r12,r12[-1]
++ ld.sb r5,r5[-32768]
++ ld.sb r4,r4[32767]
++ ld.sb lr,lr[1]
++ ld.sb r7,sp[-28663]
++ ld.sb r2,r1[-5879]
++ ld.sb r12,r3[18734]
++ .text
++ .global ld_ub4
++ld_ub4:
++ ld.ub pc,pc[0]
++ ld.ub r12,r12[-1]
++ ld.ub r5,r5[-32768]
++ ld.ub r4,r4[32767]
++ ld.ub lr,lr[1]
++ ld.ub pc,r4[8277]
++ ld.ub r5,r12[19172]
++ ld.ub r10,lr[26347]
++ .text
++ .global st_d4
++st_d4:
++ st.d pc[0],r0
++ st.d r12[-1],r14
++ st.d r5[-32768],r8
++ st.d r4[32767],r6
++ st.d lr[1],r2
++ st.d r5[13200],r10
++ st.d r5[9352],r10
++ st.d r5[32373],r4
++ .text
++ .global st_w4
++st_w4:
++ st.w pc[0],pc
++ st.w r12[-1],r12
++ st.w r5[-32768],r5
++ st.w r4[32767],r4
++ st.w lr[1],lr
++ st.w sp[6136],r7
++ st.w r6[27087],r12
++ /* st.w r3[20143],r7 */
++ nop
++ .text
++ .global st_h4
++st_h4:
++ st.h pc[0],pc
++ st.h r12[-1],r12
++ st.h r5[-32768],r5
++ st.h r4[32767],r4
++ st.h lr[1],lr
++ st.h r4[-9962],r7
++ st.h r9[-16250],r3
++ /* st.h r8[-28810],r7 */
++ nop
++ .text
++ .global st_b4
++st_b4:
++ st.b pc[0],pc
++ st.b r12[-1],r12
++ st.b r5[-32768],r5
++ st.b r4[32767],r4
++ st.b lr[1],lr
++ st.b r12[30102],r6
++ st.b r5[28977],r1
++ st.b r0[5470],r1
++ .text
++ .global mfsr
++mfsr:
++ mfsr pc,0
++ mfsr r12,1020
++ mfsr r5,512
++ mfsr r4,508
++ mfsr lr,4
++ mfsr r2,696
++ mfsr r4,260
++ mfsr r10,1016
++ .text
++ .global mtsr
++mtsr:
++ mtsr 0,pc
++ mtsr 1020,r12
++ mtsr 512,r5
++ mtsr 508,r4
++ mtsr 4,lr
++ mtsr 224,r10
++ mtsr 836,r12
++ mtsr 304,r9
++ .text
++ .global mfdr
++mfdr:
++ mfdr pc,0
++ mfdr r12,1020
++ mfdr r5,512
++ mfdr r4,508
++ mfdr lr,4
++ mfdr r6,932
++ mfdr r5,36
++ mfdr r9,300
++ .text
++ .global mtdr
++mtdr:
++ mtdr 0,pc
++ mtdr 1020,r12
++ mtdr 512,r5
++ mtdr 508,r4
++ mtdr 4,lr
++ mtdr 180,r8
++ mtdr 720,r10
++ mtdr 408,lr
++ .text
++ .global sleep
++sleep:
++ sleep 0
++ sleep 255
++ sleep 128
++ sleep 127
++ sleep 1
++ sleep 254
++ sleep 15
++ sleep 43
++ .text
++ .global sync
++sync:
++ sync 0
++ sync 255
++ sync 128
++ sync 127
++ sync 1
++ sync 166
++ sync 230
++ sync 180
++ .text
++ .global bld
++bld:
++ bld pc,0
++ bld r12,31
++ bld r5,16
++ bld r4,15
++ bld lr,1
++ bld r9,15
++ bld r0,4
++ bld lr,26
++ .text
++ .global bst
++bst:
++ bst pc,0
++ bst r12,31
++ bst r5,16
++ bst r4,15
++ bst lr,1
++ bst r10,28
++ bst r0,3
++ bst sp,2
++ .text
++ .global sats
++sats:
++ sats pc>>0,0
++ sats r12>>31,31
++ sats r5>>16,16
++ sats r4>>15,15
++ sats lr>>1,1
++ sats r10>>3,19
++ sats r10>>2,26
++ sats r1>>20,1
++ .text
++ .global satu
++satu:
++ satu pc>>0,0
++ satu r12>>31,31
++ satu r5>>16,16
++ satu r4>>15,15
++ satu lr>>1,1
++ satu pc>>5,7
++ satu r7>>5,5
++ satu r2>>26,19
++ .text
++ .global satrnds
++satrnds:
++ satrnds pc>>0,0
++ satrnds r12>>31,31
++ satrnds r5>>16,16
++ satrnds r4>>15,15
++ satrnds lr>>1,1
++ satrnds r0>>21,19
++ satrnds sp>>0,2
++ satrnds r7>>6,29
++ .text
++ .global satrndu
++satrndu:
++ satrndu pc>>0,0
++ satrndu r12>>31,31
++ satrndu r5>>16,16
++ satrndu r4>>15,15
++ satrndu lr>>1,1
++ satrndu r12>>0,26
++ satrndu r4>>21,3
++ satrndu r10>>3,16
++ .text
++ .global subfc
++subfc:
++ subfeq pc,0
++ subfal r12,-1
++ subfls r5,-128
++ subfpl r4,127
++ subfne lr,1
++ subfls r10,8
++ subfvc r11,99
++ subfvs r2,73
++ .text
++ .global subc
++subc:
++ subeq pc,0
++ subal r12,-1
++ subls r5,-128
++ subpl r4,127
++ subne lr,1
++ subls r12,118
++ subvc lr,-12
++ submi r4,-13
++ .text
++ .global movc2
++movc2:
++ moveq pc,0
++ moval r12,-1
++ movls r5,-128
++ movpl r4,127
++ movne lr,1
++ movlt r3,-122
++ movvc r8,2
++ movne r7,-111
++ .text
++ .global cp_b
++cp_b:
++ cp.b pc,r0
++ cp.b r0,pc
++ cp.b r7,r8
++ cp.b r8,r7
++ .text
++ .global cp_h
++cp_h:
++ cp.h pc,r0
++ cp.h r0,pc
++ cp.h r7,r8
++ cp.h r8,r7
++ .text
++ .global ldm
++ldm:
++ ldm pc,r1-r6
++ ldm r12,r0-r15
++ ldm r5,r15
++ ldm r4,r0-r14
++ ldm lr,r0
++ ldm r9,r1,r5,r14
++ ldm r11,r2-r3,r5-r8,r15
++ ldm r6,r0,r3,r9,r13,r15
++ .text
++ .global ldm_pu
++ldm_pu:
++ ldm pc++,r6-r9
++ ldm r12++,r0-r15
++ ldm r5++,r15
++ ldm r4++,r0-r14
++ ldm lr++,r0
++ ldm r12++,r3-r5,r8,r10,r12,r14-r15
++ ldm r10++,r2,r4-r6,r14-r15
++ ldm r6++,r1,r3-r4,r9-r14
++ .text
++ .global ldmts
++ldmts:
++ ldmts pc,r7-r8
++ ldmts r12,r0-r15
++ ldmts r5,r15
++ ldmts r4,r0-r14
++ ldmts lr,r0
++ ldmts r0,r1-r2,r11-r12
++ ldmts lr,r0-r2,r4,r7-r8,r13-r14
++ ldmts r12,r0-r1,r3-r5,r9,r14-r15
++ .text
++ .global ldmts_pu
++ldmts_pu:
++ ldmts pc++,r9
++ ldmts r12++,r0-r15
++ ldmts r5++,r15
++ ldmts r4++,r0-r14
++ ldmts lr++,r0
++ ldmts sp++,r0,r2-r5,r7,r9,r11
++ ldmts r5++,r1-r3,r7,r10-r11
++ ldmts r8++,r2-r4,r7-r8,r13,r15
++ .text
++ .global stm
++stm:
++ stm pc,r7
++ stm r12,r0-r15
++ stm r5,r15
++ stm r4,r0-r14
++ stm lr,r0
++ stm sp,r2-r3,r5,r8,r11,r14
++ stm r4,r0-r4,r6,r10-r11,r14
++ stm r9,r1,r5,r9,r12-r15
++ .text
++ .global stm_pu
++stm_pu:
++ stm --pc,r4-r6
++ stm --r12,r0-r15
++ stm --r5,r15
++ stm --r4,r0-r14
++ stm --lr,r0
++ stm --r11,r0,r4-r9,r11-r15
++ stm --r11,r0,r3,r9-r10,r12,r14
++ stm --r6,r2,r8-r9,r13-r14
++ .text
++ .global stmts
++stmts:
++ stmts pc,r8
++ stmts r12,r0-r15
++ stmts r5,r15
++ stmts r4,r0-r14
++ stmts lr,r0
++ stmts r1,r0-r1,r3-r4,r6,r9-r10,r14-r15
++ stmts r3,r0,r6-r8,r10-r12
++ stmts r11,r0,r4,r6-r7,r9-r10,r12,r14-r15
++ .text
++ .global stmts_pu
++stmts_pu:
++ stmts --pc,r6-r8
++ stmts --r12,r0-r15
++ stmts --r5,r15
++ stmts --r4,r0-r14
++ stmts --lr,r0
++ stmts --r2,r0,r3-r4,r9-r10,r12-r13
++ stmts --r3,r0-r1,r14-r15
++ stmts --r0,r0,r2-r6,r10,r14
++ .text
++ .global ldins_h
++ldins_h:
++ ldins.h pc:b,pc[0]
++ ldins.h r12:t,r12[-2]
++ ldins.h r5:t,r5[-4096]
++ ldins.h r4:b,r4[4094]
++ ldins.h lr:t,lr[2]
++ ldins.h r0:t,lr[1930]
++ ldins.h r3:b,r7[-534]
++ ldins.h r2:b,r12[-2252]
++ .text
++ .global ldins_b
++ldins_b:
++ ldins.b pc:b,pc[0]
++ ldins.b r12:t,r12[-1]
++ ldins.b r5:u,r5[-2048]
++ ldins.b r4:l,r4[2047]
++ ldins.b lr:l,lr[1]
++ ldins.b r6:t,r4[-662]
++ ldins.b r5:b,r1[-151]
++ ldins.b r10:t,r11[-1923]
++ .text
++ .global ldswp_sh
++ldswp_sh:
++ ldswp.sh pc,pc[0]
++ ldswp.sh r12,r12[-2]
++ ldswp.sh r5,r5[-4096]
++ ldswp.sh r4,r4[4094]
++ ldswp.sh lr,lr[2]
++ ldswp.sh r9,r10[3848]
++ ldswp.sh r4,r12[-2040]
++ ldswp.sh r10,r2[3088]
++ .text
++ .global ldswp_uh
++ldswp_uh:
++ ldswp.uh pc,pc[0]
++ ldswp.uh r12,r12[-2]
++ ldswp.uh r5,r5[-4096]
++ ldswp.uh r4,r4[4094]
++ ldswp.uh lr,lr[2]
++ ldswp.uh r4,r9[3724]
++ ldswp.uh lr,sp[-1672]
++ ldswp.uh r8,r12[-3846]
++ .text
++ .global ldswp_w
++ldswp_w:
++ ldswp.w pc,pc[0]
++ ldswp.w r12,r12[-4]
++ ldswp.w r5,r5[-8192]
++ ldswp.w r4,r4[8188]
++ ldswp.w lr,lr[4]
++ ldswp.w sp,r7[1860]
++ ldswp.w pc,r5[-3324]
++ ldswp.w r12,r10[-3296]
++ .text
++ .global stswp_h
++stswp_h:
++ stswp.h pc[0],pc
++ stswp.h r12[-2],r12
++ stswp.h r5[-4096],r5
++ stswp.h r4[4094],r4
++ stswp.h lr[2],lr
++ stswp.h r7[64],r10
++ stswp.h r10[3024],r2
++ stswp.h r0[-2328],r10
++ .text
++ .global stswp_w
++stswp_w:
++ stswp.w pc[0],pc
++ stswp.w r12[-4],r12
++ stswp.w r5[-8192],r5
++ stswp.w r4[8188],r4
++ stswp.w lr[4],lr
++ stswp.w pc[1156],r8
++ stswp.w sp[7992],r10
++ stswp.w r8[-1172],r5
++ .text
++ .global and2
++and2:
++ and pc,pc,pc<<0
++ and r12,r12,r12<<31
++ and r5,r5,r5<<16
++ and r4,r4,r4<<15
++ and lr,lr,lr<<1
++ and r10,r2,r1<<1
++ and r12,r8,r11<<27
++ and r10,r7,r0<<3
++ .text
++ .global and3
++and3:
++ and pc,pc,pc>>0
++ and r12,r12,r12>>31
++ and r5,r5,r5>>16
++ and r4,r4,r4>>15
++ and lr,lr,lr>>1
++ and r12,r8,r7>>17
++ and pc,r4,r9>>20
++ and r10,r9,r10>>12
++ .text
++ .global or2
++or2:
++ or pc,pc,pc<<0
++ or r12,r12,r12<<31
++ or r5,r5,r5<<16
++ or r4,r4,r4<<15
++ or lr,lr,lr<<1
++ or r8,sp,r11<<29
++ or pc,r9,r2<<28
++ or r5,r1,r2<<3
++ .text
++ .global or3
++or3:
++ or pc,pc,pc>>0
++ or r12,r12,r12>>31
++ or r5,r5,r5>>16
++ or r4,r4,r4>>15
++ or lr,lr,lr>>1
++ or r1,sp,sp>>2
++ or r0,r1,r1>>29
++ or r4,r12,r8>>8
++ .text
++ .global eor2
++eor2:
++ eor pc,pc,pc<<0
++ eor r12,r12,r12<<31
++ eor r5,r5,r5<<16
++ eor r4,r4,r4<<15
++ eor lr,lr,lr<<1
++ eor r10,r9,r4<<11
++ eor r4,r0,r1<<31
++ eor r6,r2,r12<<13
++ .text
++ .global eor3
++eor3:
++ eor pc,pc,pc>>0
++ eor r12,r12,r12>>31
++ eor r5,r5,r5>>16
++ eor r4,r4,r4>>15
++ eor lr,lr,lr>>1
++ eor r5,r5,r5>>22
++ eor r10,r1,lr>>3
++ eor r7,lr,sp>>26
++ .text
++ .global sthh_w2
++sthh_w2:
++ sthh.w pc[pc<<0],pc:b,pc:b
++ sthh.w r12[r12<<3],r12:t,r12:t
++ sthh.w r5[r5<<2],r5:t,r5:t
++ sthh.w r4[r4<<1],r4:b,r4:b
++ sthh.w lr[lr<<1],lr:t,lr:t
++ sthh.w sp[r6<<3],r1:t,r12:t
++ sthh.w r6[r6<<0],r9:t,r9:t
++ sthh.w r10[r3<<0],r0:b,r11:t
++ .text
++ .global sthh_w1
++sthh_w1:
++ sthh.w pc[0],pc:b,pc:b
++ sthh.w r12[1020],r12:t,r12:t
++ sthh.w r5[512],r5:t,r5:t
++ sthh.w r4[508],r4:b,r4:b
++ sthh.w lr[4],lr:t,lr:t
++ sthh.w r4[404],r9:t,r0:b
++ sthh.w r8[348],r2:t,r10:b
++ sthh.w sp[172],r9:b,r2:b
++ .text
++ .global cop
++cop:
++ cop cp0,cr0,cr0,cr0,0
++ cop cp7,cr15,cr15,cr15,0x7f
++ cop cp3,cr5,cr5,cr5,0x31
++ cop cp2,cr4,cr4,cr4,0x30
++ cop cp5,cr8,cr3,cr7,0x5a
++ .text
++ .global ldc_w1
++ldc_w1:
++ ldc.w cp0,cr0,r0[0]
++ ldc.w cp7,cr15,pc[255<<2]
++ ldc.w cp3,cr5,r5[128<<2]
++ ldc.w cp2,cr4,r4[127<<2]
++ ldc.w cp4,cr9,r13[36<<2]
++ .text
++ .global ldc_w2
++ldc_w2:
++ ldc.w cp0,cr0,--r0
++ ldc.w cp7,cr15,--pc
++ ldc.w cp3,cr5,--r5
++ ldc.w cp2,cr4,--r4
++ ldc.w cp4,cr9,--r13
++ .text
++ .global ldc_w3
++ldc_w3:
++ ldc.w cp0,cr0,r0[r0]
++ ldc.w cp7,cr15,pc[pc<<3]
++ ldc.w cp3,cr5,r5[r4<<2]
++ ldc.w cp2,cr4,r4[r3<<1]
++ ldc.w cp4,cr9,r13[r12<<0]
++ .text
++ .global ldc_d1
++ldc_d1:
++ ldc.d cp0,cr0,r0[0]
++ ldc.d cp7,cr14,pc[255<<2]
++ ldc.d cp3,cr6,r5[128<<2]
++ ldc.d cp2,cr4,r4[127<<2]
++ ldc.d cp4,cr8,r13[36<<2]
++ .text
++ .global ldc_d2
++ldc_d2:
++ ldc.d cp0,cr0,--r0
++ ldc.d cp7,cr14,--pc
++ ldc.d cp3,cr6,--r5
++ ldc.d cp2,cr4,--r4
++ ldc.d cp4,cr8,--r13
++ .text
++ .global ldc_d3
++ldc_d3:
++ ldc.d cp0,cr0,r0[r0]
++ ldc.d cp7,cr14,pc[pc<<3]
++ ldc.d cp3,cr6,r5[r4<<2]
++ ldc.d cp2,cr4,r4[r3<<1]
++ ldc.d cp4,cr8,r13[r12<<0]
++ .text
++ .global stc_w1
++stc_w1:
++ stc.w cp0,r0[0],cr0
++ stc.w cp7,pc[255<<2],cr15
++ stc.w cp3,r5[128<<2],cr5
++ stc.w cp2,r4[127<<2],cr4
++ stc.w cp4,r13[36<<2],cr9
++ .text
++ .global stc_w2
++stc_w2:
++ stc.w cp0,r0++,cr0
++ stc.w cp7,pc++,cr15
++ stc.w cp3,r5++,cr5
++ stc.w cp2,r4++,cr4
++ stc.w cp4,r13++,cr9
++ .text
++ .global stc_w3
++stc_w3:
++ stc.w cp0,r0[r0],cr0
++ stc.w cp7,pc[pc<<3],cr15
++ stc.w cp3,r5[r4<<2],cr5
++ stc.w cp2,r4[r3<<1],cr4
++ stc.w cp4,r13[r12<<0],cr9
++ .text
++ .global stc_d1
++stc_d1:
++ stc.d cp0,r0[0],cr0
++ stc.d cp7,pc[255<<2],cr14
++ stc.d cp3,r5[128<<2],cr6
++ stc.d cp2,r4[127<<2],cr4
++ stc.d cp4,r13[36<<2],cr8
++ .text
++ .global stc_d2
++stc_d2:
++ stc.d cp0,r0++,cr0
++ stc.d cp7,pc++,cr14
++ stc.d cp3,r5++,cr6
++ stc.d cp2,r4++,cr4
++ stc.d cp4,r13++,cr8
++ .text
++ .global stc_d3
++stc_d3:
++ stc.d cp0,r0[r0],cr0
++ stc.d cp7,pc[pc<<3],cr14
++ stc.d cp3,r5[r4<<2],cr6
++ stc.d cp2,r4[r3<<1],cr4
++ stc.d cp4,r13[r12<<0],cr8
++ .text
++ .global ldc0_w
++ldc0_w:
++ ldc0.w cr0,r0[0]
++ ldc0.w cr15,pc[4095<<2]
++ ldc0.w cr5,r5[2048<<2]
++ ldc0.w cr4,r4[2047<<2]
++ ldc0.w cr9,r13[147<<2]
++ .text
++ .global ldc0_d
++ldc0_d:
++ ldc0.d cr0,r0[0]
++ ldc0.d cr14,pc[4095<<2]
++ ldc0.d cr6,r5[2048<<2]
++ ldc0.d cr4,r4[2047<<2]
++ ldc0.d cr8,r13[147<<2]
++ .text
++ .global stc0_w
++stc0_w:
++ stc0.w r0[0],cr0
++ stc0.w pc[4095<<2],cr15
++ stc0.w r5[2048<<2],cr5
++ stc0.w r4[2047<<2],cr4
++ stc0.w r13[147<<2],cr9
++ .text
++ .global stc0_d
++stc0_d:
++ stc0.d r0[0],cr0
++ stc0.d pc[4095<<2],cr14
++ stc0.d r5[2048<<2],cr6
++ stc0.d r4[2047<<2],cr4
++ stc0.d r13[147<<2],cr8
++ .text
++ .global memc
++memc:
++ memc 0, 0
++ memc -4, 31
++ memc -65536, 16
++ memc 65532, 15
++ .text
++ .global mems
++mems:
++ mems 0, 0
++ mems -4, 31
++ mems -65536, 16
++ mems 65532, 15
++ .text
++ .global memt
++memt:
++ memt 0, 0
++ memt -4, 31
++ memt -65536, 16
++ memt 65532, 15
++
++ .text
++ .global stcond
++stcond:
++ stcond r0[0], r0
++ stcond pc[-1], pc
++ stcond r8[-32768], r7
++ stcond r7[32767], r8
++ stcond r5[0x1234], r10
++
++ldcm_w:
++ ldcm.w cp0,pc,cr0-cr7
++ ldcm.w cp7,r0,cr0
++ ldcm.w cp4,r4++,cr0-cr6
++ ldcm.w cp3,r7,cr7
++ ldcm.w cp1,r12++,cr1,cr4-cr6
++ ldcm.w cp0,pc,cr8-cr15
++ ldcm.w cp7,r0,cr8
++ ldcm.w cp4,r4++,cr8-cr14
++ ldcm.w cp3,r7,cr15
++ ldcm.w cp1,r12++,cr9,cr12-cr14
++
++ldcm_d:
++ ldcm.d cp0,pc,cr0-cr15
++ ldcm.d cp7,r0,cr0,cr1
++ ldcm.d cp4,r4++,cr0-cr13
++ ldcm.d cp3,r7,cr14-cr15
++ ldcm.d cp2,r12++,cr0-cr3,cr8-cr9,cr14-cr15
++
++stcm_w:
++ stcm.w cp0,pc,cr0-cr7
++ stcm.w cp7,r0,cr0
++ stcm.w cp4,--r4,cr0-cr6
++ stcm.w cp3,r7,cr7
++ stcm.w cp1,--r12,cr1,cr4-cr6
++ stcm.w cp0,pc,cr8-cr15
++ stcm.w cp7,r0,cr8
++ stcm.w cp4,--r4,cr8-cr14
++ stcm.w cp3,r7,cr15
++ stcm.w cp1,--r12,cr9,cr12-cr14
++
++stcm_d:
++ stcm.d cp0,pc,cr0-cr15
++ stcm.d cp7,r0,cr0,cr1
++ stcm.d cp4,--r4,cr0-cr13
++ stcm.d cp3,r7,cr14-cr15
++ stcm.d cp2,--r12,cr0-cr3,cr8-cr9,cr14-cr15
++
++mvcr_w:
++ mvcr.w cp7,pc,cr15
++ mvcr.w cp0,r0,cr0
++ mvcr.w cp0,pc,cr15
++ mvcr.w cp7,r0,cr15
++ mvcr.w cp7,pc,cr0
++ mvcr.w cp4,r7,cr8
++ mvcr.w cp3,r8,cr7
++
++mvcr_d:
++ mvcr.d cp7,lr,cr14
++ mvcr.d cp0,r0,cr0
++ mvcr.d cp0,lr,cr14
++ mvcr.d cp7,r0,cr14
++ mvcr.d cp7,lr,cr0
++ mvcr.d cp4,r6,cr8
++ mvcr.d cp3,r8,cr6
++
++mvrc_w:
++ mvrc.w cp7,cr15,pc
++ mvrc.w cp0,cr0,r0
++ mvrc.w cp0,cr15,pc
++ mvrc.w cp7,cr15,r0
++ mvrc.w cp7,cr0,pc
++ mvrc.w cp4,cr8,r7
++ mvrc.w cp3,cr7,r8
++
++mvrc_d:
++ mvrc.d cp7,cr14,lr
++ mvrc.d cp0,cr0,r0
++ mvrc.d cp0,cr14,lr
++ mvrc.d cp7,cr14,r0
++ mvrc.d cp7,cr0,lr
++ mvrc.d cp4,cr8,r6
++ mvrc.d cp3,cr6,r8
++
++bfexts:
++ bfexts pc,pc,31,31
++ bfexts r0,r0,0,0
++ bfexts r0,pc,31,31
++ bfexts pc,r0,31,31
++ bfexts pc,pc,0,31
++ bfexts pc,pc,31,0
++ bfexts r7,r8,15,16
++ bfexts r8,r7,16,15
++
++bfextu:
++ bfextu pc,pc,31,31
++ bfextu r0,r0,0,0
++ bfextu r0,pc,31,31
++ bfextu pc,r0,31,31
++ bfextu pc,pc,0,31
++ bfextu pc,pc,31,0
++ bfextu r7,r8,15,16
++ bfextu r8,r7,16,15
++
++bfins:
++ bfins pc,pc,31,31
++ bfins r0,r0,0,0
++ bfins r0,pc,31,31
++ bfins pc,r0,31,31
++ bfins pc,pc,0,31
++ bfins pc,pc,31,0
++ bfins r7,r8,15,16
++ bfins r8,r7,16,15
++
++rsubc:
++ rsubeq pc,0
++ rsubal r12,-1
++ rsubls r5,-128
++ rsubpl r4,127
++ rsubne lr,1
++ rsubls r12,118
++ rsubvc lr,-12
++ rsubmi r4,-13
++
++addc:
++ addeq pc,pc,pc
++ addal r12,r12,r12
++ addls r5,r5,r5
++ addpl r4,r4,r4
++ addne lr,lr,lr
++ addls r10,r2,r1
++ addvc r12,r8,r11
++ addmi r10,r7,r0
++
++subc2:
++ subeq pc,pc,pc
++ subal r12,r12,r12
++ subls r5,r5,r5
++ subpl r4,r4,r4
++ subne lr,lr,lr
++ subls r10,r2,r1
++ subvc r12,r8,r11
++ submi r10,r7,r0
++
++andc:
++ andeq pc,pc,pc
++ andal r12,r12,r12
++ andls r5,r5,r5
++ andpl r4,r4,r4
++ andne lr,lr,lr
++ andls r10,r2,r1
++ andvc r12,r8,r11
++ andmi r10,r7,r0
++
++orc:
++ oreq pc,pc,pc
++ oral r12,r12,r12
++ orls r5,r5,r5
++ orpl r4,r4,r4
++ orne lr,lr,lr
++ orls r10,r2,r1
++ orvc r12,r8,r11
++ ormi r10,r7,r0
++
++eorc:
++ eoreq pc,pc,pc
++ eoral r12,r12,r12
++ eorls r5,r5,r5
++ eorpl r4,r4,r4
++ eorne lr,lr,lr
++ eorls r10,r2,r1
++ eorvc r12,r8,r11
++ eormi r10,r7,r0
++
++ldcond:
++ ld.weq pc,pc[2044]
++ ld.shal r12,r12[1022]
++ ld.uhls r5,r5[0]
++ ld.ubpl r4,r4[511]
++ ld.sbne lr,lr[0]
++ ld.wls r10,r2[0]
++ ld.shvc r12,r8[0x3fe]
++ ld.ubmi r10,r7[1]
++
++stcond2:
++ st.weq pc[2044],pc
++ st.hal r12[1022],r12
++ st.hls r5[0],r5
++ st.bpl r4[511],r4
++ st.bne lr[0],lr
++ st.wls r2[0],r10
++ st.hvc r8[0x3fe],r12
++ st.bmi r7[1],r10
++
++movh:
++ movh pc, 65535
++ movh r0, 0
++ movh r5, 1
++ movh r12, 32767
++
++
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/avr32.exp
+@@ -0,0 +1,23 @@
++# AVR32 assembler testsuite. -*- Tcl -*-
++
++if [istarget avr32-*-*] {
++ run_dump_test "hwrd-lwrd"
++ run_dump_test "pcrel"
++ run_dump_test "aliases"
++ run_dump_test "dwarf2"
++ run_dump_test "pic_reloc"
++ run_dump_test "fpinsn"
++ run_dump_test "pico"
++ run_dump_test "lda_pic"
++ run_dump_test "lda_pic_linkrelax"
++ run_dump_test "lda_nopic"
++ run_dump_test "lda_nopic_linkrelax"
++ run_dump_test "call_pic"
++ run_dump_test "call_pic_linkrelax"
++ run_dump_test "call_nopic"
++ run_dump_test "call_nopic_linkrelax"
++ run_dump_test "jmptable"
++ run_dump_test "jmptable_linkrelax"
++ run_dump_test "symdiff"
++ run_dump_test "symdiff_linkrelax"
++}
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/call_nopic.d
+@@ -0,0 +1,36 @@
++#source: call.s
++#as:
++#objdump: -dr
++#name: call_nopic
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <call_test>:
++ 0: d7 03 nop
++
++00000002 <toofar_negative>:
++ \.\.\.
++ 1ffffe: 00 00 add r0,r0
++ 200000: f0 a0 00 00 rcall 0 <call_test>
++ 200004: f0 1f 00 0c mcall 200034 <toofar_negative\+0x200032>
++ 200008: f0 1f 00 0c mcall 200038 <toofar_negative\+0x200036>
++ 20000c: f0 1f 00 0c mcall 20003c <toofar_negative\+0x20003a>
++ 200010: f0 1f 00 0c mcall 200040 <toofar_negative\+0x20003e>
++ \.\.\.
++ 200030: ee b0 ff ff rcall 40002e <far_positive>
++ \.\.\.
++ 200034: R_AVR32_32_CPENT \.text\+0x2
++ 200038: R_AVR32_32_CPENT \.text\.init
++ 20003c: R_AVR32_32_CPENT undefined
++ 200040: R_AVR32_32_CPENT \.text\+0x40002c
++
++0040002c <toofar_positive>:
++ 40002c: d7 03 nop
++0040002e <far_positive>:
++ 40002e: d7 03 nop
++Disassembly of section \.text\.init:
++
++00000000 <different_section>:
++ 0: e2 c0 00 00 sub r0,r1,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/call_nopic_linkrelax.d
+@@ -0,0 +1,43 @@
++#source: call.s
++#as: --linkrelax
++#objdump: -dr
++#name: call_nopic_linkrelax
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <call_test>:
++ 0: d7 03 nop
++
++00000002 <toofar_negative>:
++ \.\.\.
++ 1ffffe: 00 00 add r0,r0
++ 200000: e0 a0 00 00 rcall 200000 <toofar_negative\+0x1ffffe>
++ 200000: R_AVR32_22H_PCREL \.text
++ 200004: f0 1f 00 00 mcall 200004 <toofar_negative\+0x200002>
++ 200004: R_AVR32_CPCALL \.text\+0x200034
++ 200008: f0 1f 00 00 mcall 200008 <toofar_negative\+0x200006>
++ 200008: R_AVR32_CPCALL \.text\+0x200038
++ 20000c: f0 1f 00 00 mcall 20000c <toofar_negative\+0x20000a>
++ 20000c: R_AVR32_CPCALL \.text\+0x20003c
++ 200010: f0 1f 00 00 mcall 200010 <toofar_negative\+0x20000e>
++ 200010: R_AVR32_CPCALL \.text\+0x200040
++ \.\.\.
++ 200030: e0 a0 00 00 rcall 200030 <toofar_negative\+0x20002e>
++ 200030: R_AVR32_22H_PCREL \.text\+0x40002e
++ \.\.\.
++ 200034: R_AVR32_ALIGN \*ABS\*\+0x2
++ 200034: R_AVR32_32_CPENT \.text\+0x2
++ 200038: R_AVR32_32_CPENT \.text\.init
++ 20003c: R_AVR32_32_CPENT undefined
++ 200040: R_AVR32_32_CPENT \.text\+0x40002c
++
++0040002c <toofar_positive>:
++ 40002c: d7 03 nop
++0040002e <far_positive>:
++ 40002e: d7 03 nop
++Disassembly of section \.text\.init:
++
++00000000 <different_section>:
++ 0: e2 c0 00 00 sub r0,r1,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/call_pic.d
+@@ -0,0 +1,36 @@
++#source: call.s
++#as: --pic
++#objdump: -dr
++#name: call_pic
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <call_test>:
++ 0: d7 03 nop
++
++00000002 <toofar_negative>:
++ \.\.\.
++ 1ffffe: 00 00 add r0,r0
++ 200000: f0 a0 00 00 rcall 0 <call_test>
++ 200004: f0 16 00 00 mcall r6\[0\]
++ 200004: R_AVR32_GOT18SW toofar_negative
++ 200008: f0 16 00 00 mcall r6\[0\]
++ 200008: R_AVR32_GOT18SW different_section
++ 20000c: f0 16 00 00 mcall r6\[0\]
++ 20000c: R_AVR32_GOT18SW undefined
++ 200010: f0 16 00 00 mcall r6\[0\]
++ 200010: R_AVR32_GOT18SW toofar_positive
++ \.\.\.
++ 200030: ee b0 ff ff rcall 40002e <far_positive>
++ \.\.\.
++
++0040002c <toofar_positive>:
++ 40002c: d7 03 nop
++0040002e <far_positive>:
++ 40002e: d7 03 nop
++Disassembly of section \.text\.init:
++
++00000000 <different_section>:
++ 0: e2 c0 00 00 sub r0,r1,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/call_pic_linkrelax.d
+@@ -0,0 +1,47 @@
++#source: call.s
++#as: --pic --linkrelax
++#objdump: -dr
++#name: call_pic_linkrelax
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <call_test>:
++ 0: d7 03 nop
++
++00000002 <toofar_negative>:
++ \.\.\.
++ 1ffffe: 00 00 add r0,r0
++ 200000: e0 a0 00 00 rcall 200000 <toofar_negative\+0x1ffffe>
++ 200000: R_AVR32_22H_PCREL \.text
++ 200004: e0 6e 00 00 mov lr,0
++ 200004: R_AVR32_GOTCALL toofar_negative
++ 200008: ec 0e 03 2e ld\.w lr,r6\[lr<<0x2\]
++ 20000c: 5d 1e icall lr
++ 20000e: e0 6e 00 00 mov lr,0
++ 20000e: R_AVR32_GOTCALL different_section
++ 200012: ec 0e 03 2e ld\.w lr,r6\[lr<<0x2\]
++ 200016: 5d 1e icall lr
++ 200018: e0 6e 00 00 mov lr,0
++ 200018: R_AVR32_GOTCALL undefined
++ 20001c: ec 0e 03 2e ld\.w lr,r6\[lr<<0x2\]
++ 200020: 5d 1e icall lr
++ 200022: e0 6e 00 00 mov lr,0
++ 200022: R_AVR32_GOTCALL toofar_positive
++ 200026: ec 0e 03 2e ld\.w lr,r6\[lr<<0x2\]
++ 20002a: 5d 1e icall lr
++ 20002c: 00 00 add r0,r0
++ 20002e: 00 00 add r0,r0
++ 200030: e0 a0 00 00 rcall 200030 <toofar_negative\+0x20002e>
++ 200030: R_AVR32_22H_PCREL \.text\+0x40002e
++ \.\.\.
++
++0040002c <toofar_positive>:
++ 40002c: d7 03 nop
++0040002e <far_positive>:
++ 40002e: d7 03 nop
++Disassembly of section \.text\.init:
++
++00000000 <different_section>:
++ 0: e2 c0 00 00 sub r0,r1,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/call.s
+@@ -0,0 +1,30 @@
++
++ .text
++ .global call_test
++call_test:
++far_negative:
++ nop
++toofar_negative:
++
++ .org 0x200000
++
++ call far_negative
++ call toofar_negative
++ call different_section
++ call undefined
++ call toofar_positive
++ .org 0x200030
++ call far_positive
++
++ .cpool
++
++ .org 0x40002c
++
++toofar_positive:
++ nop
++far_positive:
++ nop
++
++ .section .text.init,"ax",@progbits
++different_section:
++ sub r0, r1, 0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/dwarf2.d
+@@ -0,0 +1,42 @@
++#readelf: -wl
++#name: dwarf2
++#source: dwarf2.s
++
++Dump of debug contents of section \.debug_line:
++
++ Length: 53
++ DWARF Version: 2
++ Prologue Length: 26
++ Minimum Instruction Length: 1
++ Initial value of 'is_stmt': 1
++ Line Base: -5
++ Line Range: 14
++ Opcode Base: 10
++ \(Pointer size: 4\)
++
++ Opcodes:
++ Opcode 1 has 0 args
++ Opcode 2 has 1 args
++ Opcode 3 has 1 args
++ Opcode 4 has 1 args
++ Opcode 5 has 1 args
++ Opcode 6 has 0 args
++ Opcode 7 has 0 args
++ Opcode 8 has 0 args
++ Opcode 9 has 1 args
++
++ The Directory Table is empty\.
++
++ The File Name Table:
++ Entry Dir Time Size Name
++ 1 0 0 0 main\.c
++
++ Line Number Statements:
++ Extended opcode 2: set Address to 0x0
++ Advance Line by 87 to 88
++ Copy
++ Advance Line by 23 to 111
++ Special opcode .*: advance Address by 4 to 0x4 and Line by 0 to 111
++ Special opcode .*: advance Address by 10 to 0xe and Line by 1 to 112
++ Advance PC by 530 to 220
++ Extended opcode 1: End of Sequence
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/dwarf2.s
+@@ -0,0 +1,67 @@
++# Source file used to test DWARF2 information for AVR32.
++
++ .file "main.c"
++
++ .section .debug_abbrev,"",@progbits
++.Ldebug_abbrev0:
++ .section .debug_info,"",@progbits
++.Ldebug_info0:
++ .section .debug_line,"",@progbits
++.Ldebug_line0:
++
++ .text
++ .align 1
++ .globl main
++ .type main, @function
++.Ltext0:
++main:
++ .file 1 "main.c"
++ .loc 1 88 0
++ pushm r0-r7,lr
++ sub sp, 4
++ .loc 1 111 0
++ lddpc r12, .LC1
++ lddpc r7, .LC1
++ icall r7
++ .loc 1 112 0
++ lddpc r6, .LC4
++
++ .align 2
++.LC4: .int 0
++
++ .fill 256, 2, 0
++
++ .align 2
++.LC1:
++ .int 0
++.LC2:
++ .int 0
++.LC3:
++ .int 0
++ .size main, . - main
++
++.Letext0:
++
++ .section .debug_info
++ .int .Ledebug_info0 - .Ldebug_info0 // size
++ .short 2 // version
++ .int .Ldebug_abbrev0 // abbrev offset
++ .byte 4 // bytes per addr
++
++ .uleb128 1 // abbrev 1
++ .int .Ldebug_line0 // DW_AT_stmt_list
++ .int .Letext0 // DW_AT_high_pc
++ .int .Ltext0 // DW_AT_low_pc
++
++.Ledebug_info0:
++
++ .section .debug_abbrev
++ .uleb128 0x01
++ .uleb128 0x11 // DW_TAG_compile_unit
++ .byte 0 // DW_CHILDREN_no
++ .uleb128 0x10, 0x6 // DW_AT_stmt_list
++ .uleb128 0x12, 0x1 // DW_AT_high_pc
++ .uleb128 0x11, 0x1 // DW_AT_low_pc
++ .uleb128 0, 0
++
++ .byte 0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/fpinsn.d
+@@ -0,0 +1,271 @@
++#as:
++#objdump: -dr
++#name: fpinsn
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++[0-9a-f]* <fadd_s>:
++ *[0-9a-f]*: e1 a2 0f ff cop cp0,cr15,cr15,cr15,0x4
++ *[0-9a-f]*: e1 a2 00 00 cop cp0,cr0,cr0,cr0,0x4
++ *[0-9a-f]*: e1 a2 00 ff cop cp0,cr0,cr15,cr15,0x4
++ *[0-9a-f]*: e1 a2 0f 0f cop cp0,cr15,cr0,cr15,0x4
++ *[0-9a-f]*: e1 a2 0f f0 cop cp0,cr15,cr15,cr0,0x4
++ *[0-9a-f]*: e1 a2 07 88 cop cp0,cr7,cr8,cr8,0x4
++ *[0-9a-f]*: e1 a2 08 78 cop cp0,cr8,cr7,cr8,0x4
++ *[0-9a-f]*: e1 a2 08 87 cop cp0,cr8,cr8,cr7,0x4
++
++[0-9a-f]* <fsub_s>:
++ *[0-9a-f]*: e1 a2 1f ff cop cp0,cr15,cr15,cr15,0x5
++ *[0-9a-f]*: e1 a2 10 00 cop cp0,cr0,cr0,cr0,0x5
++ *[0-9a-f]*: e1 a2 10 ff cop cp0,cr0,cr15,cr15,0x5
++ *[0-9a-f]*: e1 a2 1f 0f cop cp0,cr15,cr0,cr15,0x5
++ *[0-9a-f]*: e1 a2 1f f0 cop cp0,cr15,cr15,cr0,0x5
++ *[0-9a-f]*: e1 a2 17 88 cop cp0,cr7,cr8,cr8,0x5
++ *[0-9a-f]*: e1 a2 18 78 cop cp0,cr8,cr7,cr8,0x5
++ *[0-9a-f]*: e1 a2 18 87 cop cp0,cr8,cr8,cr7,0x5
++
++[0-9a-f]* <fmac_s>:
++ *[0-9a-f]*: e1 a0 0f ff cop cp0,cr15,cr15,cr15,0x0
++ *[0-9a-f]*: e1 a0 00 00 cop cp0,cr0,cr0,cr0,0x0
++ *[0-9a-f]*: e1 a0 00 ff cop cp0,cr0,cr15,cr15,0x0
++ *[0-9a-f]*: e1 a0 0f 0f cop cp0,cr15,cr0,cr15,0x0
++ *[0-9a-f]*: e1 a0 0f f0 cop cp0,cr15,cr15,cr0,0x0
++ *[0-9a-f]*: e1 a0 07 88 cop cp0,cr7,cr8,cr8,0x0
++ *[0-9a-f]*: e1 a0 08 78 cop cp0,cr8,cr7,cr8,0x0
++ *[0-9a-f]*: e1 a0 08 87 cop cp0,cr8,cr8,cr7,0x0
++
++[0-9a-f]* <fnmac_s>:
++ *[0-9a-f]*: e1 a0 1f ff cop cp0,cr15,cr15,cr15,0x1
++ *[0-9a-f]*: e1 a0 10 00 cop cp0,cr0,cr0,cr0,0x1
++ *[0-9a-f]*: e1 a0 10 ff cop cp0,cr0,cr15,cr15,0x1
++ *[0-9a-f]*: e1 a0 1f 0f cop cp0,cr15,cr0,cr15,0x1
++ *[0-9a-f]*: e1 a0 1f f0 cop cp0,cr15,cr15,cr0,0x1
++ *[0-9a-f]*: e1 a0 17 88 cop cp0,cr7,cr8,cr8,0x1
++ *[0-9a-f]*: e1 a0 18 78 cop cp0,cr8,cr7,cr8,0x1
++ *[0-9a-f]*: e1 a0 18 87 cop cp0,cr8,cr8,cr7,0x1
++
++[0-9a-f]* <fmsc_s>:
++ *[0-9a-f]*: e1 a1 0f ff cop cp0,cr15,cr15,cr15,0x2
++ *[0-9a-f]*: e1 a1 00 00 cop cp0,cr0,cr0,cr0,0x2
++ *[0-9a-f]*: e1 a1 00 ff cop cp0,cr0,cr15,cr15,0x2
++ *[0-9a-f]*: e1 a1 0f 0f cop cp0,cr15,cr0,cr15,0x2
++ *[0-9a-f]*: e1 a1 0f f0 cop cp0,cr15,cr15,cr0,0x2
++ *[0-9a-f]*: e1 a1 07 88 cop cp0,cr7,cr8,cr8,0x2
++ *[0-9a-f]*: e1 a1 08 78 cop cp0,cr8,cr7,cr8,0x2
++ *[0-9a-f]*: e1 a1 08 87 cop cp0,cr8,cr8,cr7,0x2
++
++[0-9a-f]* <fnmsc_s>:
++ *[0-9a-f]*: e1 a1 1f ff cop cp0,cr15,cr15,cr15,0x3
++ *[0-9a-f]*: e1 a1 10 00 cop cp0,cr0,cr0,cr0,0x3
++ *[0-9a-f]*: e1 a1 10 ff cop cp0,cr0,cr15,cr15,0x3
++ *[0-9a-f]*: e1 a1 1f 0f cop cp0,cr15,cr0,cr15,0x3
++ *[0-9a-f]*: e1 a1 1f f0 cop cp0,cr15,cr15,cr0,0x3
++ *[0-9a-f]*: e1 a1 17 88 cop cp0,cr7,cr8,cr8,0x3
++ *[0-9a-f]*: e1 a1 18 78 cop cp0,cr8,cr7,cr8,0x3
++ *[0-9a-f]*: e1 a1 18 87 cop cp0,cr8,cr8,cr7,0x3
++
++[0-9a-f]* <fmul_s>:
++ *[0-9a-f]*: e1 a3 0f ff cop cp0,cr15,cr15,cr15,0x6
++ *[0-9a-f]*: e1 a3 00 00 cop cp0,cr0,cr0,cr0,0x6
++ *[0-9a-f]*: e1 a3 00 ff cop cp0,cr0,cr15,cr15,0x6
++ *[0-9a-f]*: e1 a3 0f 0f cop cp0,cr15,cr0,cr15,0x6
++ *[0-9a-f]*: e1 a3 0f f0 cop cp0,cr15,cr15,cr0,0x6
++ *[0-9a-f]*: e1 a3 07 88 cop cp0,cr7,cr8,cr8,0x6
++ *[0-9a-f]*: e1 a3 08 78 cop cp0,cr8,cr7,cr8,0x6
++ *[0-9a-f]*: e1 a3 08 87 cop cp0,cr8,cr8,cr7,0x6
++
++[0-9a-f]* <fnmul_s>:
++ *[0-9a-f]*: e1 a3 1f ff cop cp0,cr15,cr15,cr15,0x7
++ *[0-9a-f]*: e1 a3 10 00 cop cp0,cr0,cr0,cr0,0x7
++ *[0-9a-f]*: e1 a3 10 ff cop cp0,cr0,cr15,cr15,0x7
++ *[0-9a-f]*: e1 a3 1f 0f cop cp0,cr15,cr0,cr15,0x7
++ *[0-9a-f]*: e1 a3 1f f0 cop cp0,cr15,cr15,cr0,0x7
++ *[0-9a-f]*: e1 a3 17 88 cop cp0,cr7,cr8,cr8,0x7
++ *[0-9a-f]*: e1 a3 18 78 cop cp0,cr8,cr7,cr8,0x7
++ *[0-9a-f]*: e1 a3 18 87 cop cp0,cr8,cr8,cr7,0x7
++
++[0-9a-f]* <fneg_s>:
++ *[0-9a-f]*: e1 a4 0f f0 cop cp0,cr15,cr15,cr0,0x8
++ *[0-9a-f]*: e1 a4 00 00 cop cp0,cr0,cr0,cr0,0x8
++ *[0-9a-f]*: e1 a4 00 f0 cop cp0,cr0,cr15,cr0,0x8
++ *[0-9a-f]*: e1 a4 0f 00 cop cp0,cr15,cr0,cr0,0x8
++ *[0-9a-f]*: e1 a4 07 80 cop cp0,cr7,cr8,cr0,0x8
++ *[0-9a-f]*: e1 a4 08 70 cop cp0,cr8,cr7,cr0,0x8
++
++[0-9a-f]* <fabs_s>:
++ *[0-9a-f]*: e1 a4 1f f0 cop cp0,cr15,cr15,cr0,0x9
++ *[0-9a-f]*: e1 a4 10 00 cop cp0,cr0,cr0,cr0,0x9
++ *[0-9a-f]*: e1 a4 10 f0 cop cp0,cr0,cr15,cr0,0x9
++ *[0-9a-f]*: e1 a4 1f 00 cop cp0,cr15,cr0,cr0,0x9
++ *[0-9a-f]*: e1 a4 17 80 cop cp0,cr7,cr8,cr0,0x9
++ *[0-9a-f]*: e1 a4 18 70 cop cp0,cr8,cr7,cr0,0x9
++
++[0-9a-f]* <fcmp_s>:
++ *[0-9a-f]*: e1 a6 10 ff cop cp0,cr0,cr15,cr15,0xd
++ *[0-9a-f]*: e1 a6 10 00 cop cp0,cr0,cr0,cr0,0xd
++ *[0-9a-f]*: e1 a6 10 0f cop cp0,cr0,cr0,cr15,0xd
++ *[0-9a-f]*: e1 a6 10 f0 cop cp0,cr0,cr15,cr0,0xd
++ *[0-9a-f]*: e1 a6 10 78 cop cp0,cr0,cr7,cr8,0xd
++ *[0-9a-f]*: e1 a6 10 87 cop cp0,cr0,cr8,cr7,0xd
++
++[0-9a-f]* <fadd_d>:
++ *[0-9a-f]*: e5 a2 0e ee cop cp0,cr14,cr14,cr14,0x44
++ *[0-9a-f]*: e5 a2 00 00 cop cp0,cr0,cr0,cr0,0x44
++ *[0-9a-f]*: e5 a2 00 ee cop cp0,cr0,cr14,cr14,0x44
++ *[0-9a-f]*: e5 a2 0e 0e cop cp0,cr14,cr0,cr14,0x44
++ *[0-9a-f]*: e5 a2 0e e0 cop cp0,cr14,cr14,cr0,0x44
++ *[0-9a-f]*: e5 a2 06 88 cop cp0,cr6,cr8,cr8,0x44
++ *[0-9a-f]*: e5 a2 08 68 cop cp0,cr8,cr6,cr8,0x44
++ *[0-9a-f]*: e5 a2 08 86 cop cp0,cr8,cr8,cr6,0x44
++
++[0-9a-f]* <fsub_d>:
++ *[0-9a-f]*: e5 a2 1e ee cop cp0,cr14,cr14,cr14,0x45
++ *[0-9a-f]*: e5 a2 10 00 cop cp0,cr0,cr0,cr0,0x45
++ *[0-9a-f]*: e5 a2 10 ee cop cp0,cr0,cr14,cr14,0x45
++ *[0-9a-f]*: e5 a2 1e 0e cop cp0,cr14,cr0,cr14,0x45
++ *[0-9a-f]*: e5 a2 1e e0 cop cp0,cr14,cr14,cr0,0x45
++ *[0-9a-f]*: e5 a2 16 88 cop cp0,cr6,cr8,cr8,0x45
++ *[0-9a-f]*: e5 a2 18 68 cop cp0,cr8,cr6,cr8,0x45
++ *[0-9a-f]*: e5 a2 18 86 cop cp0,cr8,cr8,cr6,0x45
++
++[0-9a-f]* <fmac_d>:
++ *[0-9a-f]*: e5 a0 0e ee cop cp0,cr14,cr14,cr14,0x40
++ *[0-9a-f]*: e5 a0 00 00 cop cp0,cr0,cr0,cr0,0x40
++ *[0-9a-f]*: e5 a0 00 ee cop cp0,cr0,cr14,cr14,0x40
++ *[0-9a-f]*: e5 a0 0e 0e cop cp0,cr14,cr0,cr14,0x40
++ *[0-9a-f]*: e5 a0 0e e0 cop cp0,cr14,cr14,cr0,0x40
++ *[0-9a-f]*: e5 a0 06 88 cop cp0,cr6,cr8,cr8,0x40
++ *[0-9a-f]*: e5 a0 08 68 cop cp0,cr8,cr6,cr8,0x40
++ *[0-9a-f]*: e5 a0 08 86 cop cp0,cr8,cr8,cr6,0x40
++
++[0-9a-f]* <fnmac_d>:
++ *[0-9a-f]*: e5 a0 1e ee cop cp0,cr14,cr14,cr14,0x41
++ *[0-9a-f]*: e5 a0 10 00 cop cp0,cr0,cr0,cr0,0x41
++ *[0-9a-f]*: e5 a0 10 ee cop cp0,cr0,cr14,cr14,0x41
++ *[0-9a-f]*: e5 a0 1e 0e cop cp0,cr14,cr0,cr14,0x41
++ *[0-9a-f]*: e5 a0 1e e0 cop cp0,cr14,cr14,cr0,0x41
++ *[0-9a-f]*: e5 a0 16 88 cop cp0,cr6,cr8,cr8,0x41
++ *[0-9a-f]*: e5 a0 18 68 cop cp0,cr8,cr6,cr8,0x41
++ *[0-9a-f]*: e5 a0 18 86 cop cp0,cr8,cr8,cr6,0x41
++
++[0-9a-f]* <fmsc_d>:
++ *[0-9a-f]*: e5 a1 0e ee cop cp0,cr14,cr14,cr14,0x42
++ *[0-9a-f]*: e5 a1 00 00 cop cp0,cr0,cr0,cr0,0x42
++ *[0-9a-f]*: e5 a1 00 ee cop cp0,cr0,cr14,cr14,0x42
++ *[0-9a-f]*: e5 a1 0e 0e cop cp0,cr14,cr0,cr14,0x42
++ *[0-9a-f]*: e5 a1 0e e0 cop cp0,cr14,cr14,cr0,0x42
++ *[0-9a-f]*: e5 a1 06 88 cop cp0,cr6,cr8,cr8,0x42
++ *[0-9a-f]*: e5 a1 08 68 cop cp0,cr8,cr6,cr8,0x42
++ *[0-9a-f]*: e5 a1 08 86 cop cp0,cr8,cr8,cr6,0x42
++
++[0-9a-f]* <fnmsc_d>:
++ *[0-9a-f]*: e5 a1 1e ee cop cp0,cr14,cr14,cr14,0x43
++ *[0-9a-f]*: e5 a1 10 00 cop cp0,cr0,cr0,cr0,0x43
++ *[0-9a-f]*: e5 a1 10 ee cop cp0,cr0,cr14,cr14,0x43
++ *[0-9a-f]*: e5 a1 1e 0e cop cp0,cr14,cr0,cr14,0x43
++ *[0-9a-f]*: e5 a1 1e e0 cop cp0,cr14,cr14,cr0,0x43
++ *[0-9a-f]*: e5 a1 16 88 cop cp0,cr6,cr8,cr8,0x43
++ *[0-9a-f]*: e5 a1 18 68 cop cp0,cr8,cr6,cr8,0x43
++ *[0-9a-f]*: e5 a1 18 86 cop cp0,cr8,cr8,cr6,0x43
++
++[0-9a-f]* <fmul_d>:
++ *[0-9a-f]*: e5 a3 0e ee cop cp0,cr14,cr14,cr14,0x46
++ *[0-9a-f]*: e5 a3 00 00 cop cp0,cr0,cr0,cr0,0x46
++ *[0-9a-f]*: e5 a3 00 ee cop cp0,cr0,cr14,cr14,0x46
++ *[0-9a-f]*: e5 a3 0e 0e cop cp0,cr14,cr0,cr14,0x46
++ *[0-9a-f]*: e5 a3 0e e0 cop cp0,cr14,cr14,cr0,0x46
++ *[0-9a-f]*: e5 a3 06 88 cop cp0,cr6,cr8,cr8,0x46
++ *[0-9a-f]*: e5 a3 08 68 cop cp0,cr8,cr6,cr8,0x46
++ *[0-9a-f]*: e5 a3 08 86 cop cp0,cr8,cr8,cr6,0x46
++
++[0-9a-f]* <fnmul_d>:
++ *[0-9a-f]*: e5 a3 1e ee cop cp0,cr14,cr14,cr14,0x47
++ *[0-9a-f]*: e5 a3 10 00 cop cp0,cr0,cr0,cr0,0x47
++ *[0-9a-f]*: e5 a3 10 ee cop cp0,cr0,cr14,cr14,0x47
++ *[0-9a-f]*: e5 a3 1e 0e cop cp0,cr14,cr0,cr14,0x47
++ *[0-9a-f]*: e5 a3 1e e0 cop cp0,cr14,cr14,cr0,0x47
++ *[0-9a-f]*: e5 a3 16 88 cop cp0,cr6,cr8,cr8,0x47
++ *[0-9a-f]*: e5 a3 18 68 cop cp0,cr8,cr6,cr8,0x47
++ *[0-9a-f]*: e5 a3 18 86 cop cp0,cr8,cr8,cr6,0x47
++
++[0-9a-f]* <fneg_d>:
++ *[0-9a-f]*: e5 a4 0e e0 cop cp0,cr14,cr14,cr0,0x48
++ *[0-9a-f]*: e5 a4 00 00 cop cp0,cr0,cr0,cr0,0x48
++ *[0-9a-f]*: e5 a4 00 e0 cop cp0,cr0,cr14,cr0,0x48
++ *[0-9a-f]*: e5 a4 0e 00 cop cp0,cr14,cr0,cr0,0x48
++ *[0-9a-f]*: e5 a4 06 80 cop cp0,cr6,cr8,cr0,0x48
++ *[0-9a-f]*: e5 a4 08 60 cop cp0,cr8,cr6,cr0,0x48
++
++[0-9a-f]* <fabs_d>:
++ *[0-9a-f]*: e5 a4 1e e0 cop cp0,cr14,cr14,cr0,0x49
++ *[0-9a-f]*: e5 a4 10 00 cop cp0,cr0,cr0,cr0,0x49
++ *[0-9a-f]*: e5 a4 10 e0 cop cp0,cr0,cr14,cr0,0x49
++ *[0-9a-f]*: e5 a4 1e 00 cop cp0,cr14,cr0,cr0,0x49
++ *[0-9a-f]*: e5 a4 16 80 cop cp0,cr6,cr8,cr0,0x49
++ *[0-9a-f]*: e5 a4 18 60 cop cp0,cr8,cr6,cr0,0x49
++
++[0-9a-f]* <fcmp_d>:
++ *[0-9a-f]*: e5 a6 10 ee cop cp0,cr0,cr14,cr14,0x4d
++ *[0-9a-f]*: e5 a6 10 00 cop cp0,cr0,cr0,cr0,0x4d
++ *[0-9a-f]*: e5 a6 10 0e cop cp0,cr0,cr0,cr14,0x4d
++ *[0-9a-f]*: e5 a6 10 e0 cop cp0,cr0,cr14,cr0,0x4d
++ *[0-9a-f]*: e5 a6 10 68 cop cp0,cr0,cr6,cr8,0x4d
++ *[0-9a-f]*: e5 a6 10 86 cop cp0,cr0,cr8,cr6,0x4d
++
++[0-9a-f]* <fmov_s>:
++ *[0-9a-f]*: e1 a5 0f f0 cop cp0,cr15,cr15,cr0,0xa
++ *[0-9a-f]*: e1 a5 00 00 cop cp0,cr0,cr0,cr0,0xa
++ *[0-9a-f]*: e1 a5 0f 00 cop cp0,cr15,cr0,cr0,0xa
++ *[0-9a-f]*: e1 a5 00 f0 cop cp0,cr0,cr15,cr0,0xa
++ *[0-9a-f]*: e1 a5 08 70 cop cp0,cr8,cr7,cr0,0xa
++ *[0-9a-f]*: e1 a5 07 80 cop cp0,cr7,cr8,cr0,0xa
++ *[0-9a-f]*: ef af 0f 00 mvcr.w cp0,pc,cr15
++ *[0-9a-f]*: ef a0 00 00 mvcr.w cp0,r0,cr0
++ *[0-9a-f]*: ef af 00 00 mvcr.w cp0,pc,cr0
++ *[0-9a-f]*: ef a0 0f 00 mvcr.w cp0,r0,cr15
++ *[0-9a-f]*: ef a8 07 00 mvcr.w cp0,r8,cr7
++ *[0-9a-f]*: ef a7 08 00 mvcr.w cp0,r7,cr8
++ *[0-9a-f]*: ef af 0f 20 mvrc.w cp0,cr15,pc
++ *[0-9a-f]*: ef a0 00 20 mvrc.w cp0,cr0,r0
++ *[0-9a-f]*: ef a0 0f 20 mvrc.w cp0,cr15,r0
++ *[0-9a-f]*: ef af 00 20 mvrc.w cp0,cr0,pc
++ *[0-9a-f]*: ef a7 08 20 mvrc.w cp0,cr8,r7
++ *[0-9a-f]*: ef a8 07 20 mvrc.w cp0,cr7,r8
++
++[0-9a-f]* <fmov_d>:
++ *[0-9a-f]*: e5 a5 0e e0 cop cp0,cr14,cr14,cr0,0x4a
++ *[0-9a-f]*: e5 a5 00 00 cop cp0,cr0,cr0,cr0,0x4a
++ *[0-9a-f]*: e5 a5 0e 00 cop cp0,cr14,cr0,cr0,0x4a
++ *[0-9a-f]*: e5 a5 00 e0 cop cp0,cr0,cr14,cr0,0x4a
++ *[0-9a-f]*: e5 a5 08 60 cop cp0,cr8,cr6,cr0,0x4a
++ *[0-9a-f]*: e5 a5 06 80 cop cp0,cr6,cr8,cr0,0x4a
++ *[0-9a-f]*: ef ae 0e 10 mvcr.d cp0,lr,cr14
++ *[0-9a-f]*: ef a0 00 10 mvcr.d cp0,r0,cr0
++ *[0-9a-f]*: ef ae 00 10 mvcr.d cp0,lr,cr0
++ *[0-9a-f]*: ef a0 0e 10 mvcr.d cp0,r0,cr14
++ *[0-9a-f]*: ef a8 06 10 mvcr.d cp0,r8,cr6
++ *[0-9a-f]*: ef a6 08 10 mvcr.d cp0,r6,cr8
++ *[0-9a-f]*: ef ae 0e 30 mvrc.d cp0,cr14,lr
++ *[0-9a-f]*: ef a0 00 30 mvrc.d cp0,cr0,r0
++ *[0-9a-f]*: ef a0 0e 30 mvrc.d cp0,cr14,r0
++ *[0-9a-f]*: ef ae 00 30 mvrc.d cp0,cr0,lr
++ *[0-9a-f]*: ef a6 08 30 mvrc.d cp0,cr8,r6
++ *[0-9a-f]*: ef a8 06 30 mvrc.d cp0,cr6,r8
++
++[0-9a-f]* <fcasts_d>:
++ *[0-9a-f]*: e1 a7 1f e0 cop cp0,cr15,cr14,cr0,0xf
++ *[0-9a-f]*: e1 a7 10 00 cop cp0,cr0,cr0,cr0,0xf
++ *[0-9a-f]*: e1 a7 1f 00 cop cp0,cr15,cr0,cr0,0xf
++ *[0-9a-f]*: e1 a7 10 e0 cop cp0,cr0,cr14,cr0,0xf
++ *[0-9a-f]*: e1 a7 18 60 cop cp0,cr8,cr6,cr0,0xf
++ *[0-9a-f]*: e1 a7 17 80 cop cp0,cr7,cr8,cr0,0xf
++
++[0-9a-f]* <fcastd_s>:
++ *[0-9a-f]*: e1 a8 0e f0 cop cp0,cr14,cr15,cr0,0x10
++ *[0-9a-f]*: e1 a8 00 00 cop cp0,cr0,cr0,cr0,0x10
++ *[0-9a-f]*: e1 a8 0e 00 cop cp0,cr14,cr0,cr0,0x10
++ *[0-9a-f]*: e1 a8 00 f0 cop cp0,cr0,cr15,cr0,0x10
++ *[0-9a-f]*: e1 a8 08 70 cop cp0,cr8,cr7,cr0,0x10
++ *[0-9a-f]*: e1 a8 06 80 cop cp0,cr6,cr8,cr0,0x10
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/fpinsn.s
+@@ -0,0 +1,266 @@
++
++ .text
++ .global fadd_s
++fadd_s:
++ fadd.s fr15, fr15, fr15
++ fadd.s fr0, fr0, fr0
++ fadd.s fr0, fr15, fr15
++ fadd.s fr15, fr0, fr15
++ fadd.s fr15, fr15, fr0
++ fadd.s fr7, fr8, fr8
++ fadd.s fr8, fr7, fr8
++ fadd.s fr8, fr8, fr7
++ .global fsub_s
++fsub_s:
++ fsub.s fr15, fr15, fr15
++ fsub.s fr0, fr0, fr0
++ fsub.s fr0, fr15, fr15
++ fsub.s fr15, fr0, fr15
++ fsub.s fr15, fr15, fr0
++ fsub.s fr7, fr8, fr8
++ fsub.s fr8, fr7, fr8
++ fsub.s fr8, fr8, fr7
++ .global fmac_s
++fmac_s:
++ fmac.s fr15, fr15, fr15
++ fmac.s fr0, fr0, fr0
++ fmac.s fr0, fr15, fr15
++ fmac.s fr15, fr0, fr15
++ fmac.s fr15, fr15, fr0
++ fmac.s fr7, fr8, fr8
++ fmac.s fr8, fr7, fr8
++ fmac.s fr8, fr8, fr7
++ .global fnmac_s
++fnmac_s:
++ fnmac.s fr15, fr15, fr15
++ fnmac.s fr0, fr0, fr0
++ fnmac.s fr0, fr15, fr15
++ fnmac.s fr15, fr0, fr15
++ fnmac.s fr15, fr15, fr0
++ fnmac.s fr7, fr8, fr8
++ fnmac.s fr8, fr7, fr8
++ fnmac.s fr8, fr8, fr7
++ .global fmsc_s
++fmsc_s:
++ fmsc.s fr15, fr15, fr15
++ fmsc.s fr0, fr0, fr0
++ fmsc.s fr0, fr15, fr15
++ fmsc.s fr15, fr0, fr15
++ fmsc.s fr15, fr15, fr0
++ fmsc.s fr7, fr8, fr8
++ fmsc.s fr8, fr7, fr8
++ fmsc.s fr8, fr8, fr7
++ .global fnmsc_s
++fnmsc_s:
++ fnmsc.s fr15, fr15, fr15
++ fnmsc.s fr0, fr0, fr0
++ fnmsc.s fr0, fr15, fr15
++ fnmsc.s fr15, fr0, fr15
++ fnmsc.s fr15, fr15, fr0
++ fnmsc.s fr7, fr8, fr8
++ fnmsc.s fr8, fr7, fr8
++ fnmsc.s fr8, fr8, fr7
++ .global fmul_s
++fmul_s:
++ fmul.s fr15, fr15, fr15
++ fmul.s fr0, fr0, fr0
++ fmul.s fr0, fr15, fr15
++ fmul.s fr15, fr0, fr15
++ fmul.s fr15, fr15, fr0
++ fmul.s fr7, fr8, fr8
++ fmul.s fr8, fr7, fr8
++ fmul.s fr8, fr8, fr7
++ .global fnmul_s
++fnmul_s:
++ fnmul.s fr15, fr15, fr15
++ fnmul.s fr0, fr0, fr0
++ fnmul.s fr0, fr15, fr15
++ fnmul.s fr15, fr0, fr15
++ fnmul.s fr15, fr15, fr0
++ fnmul.s fr7, fr8, fr8
++ fnmul.s fr8, fr7, fr8
++ fnmul.s fr8, fr8, fr7
++ .global fneg_s
++fneg_s:
++ fneg.s fr15, fr15
++ fneg.s fr0, fr0
++ fneg.s fr0, fr15
++ fneg.s fr15, fr0
++ fneg.s fr7, fr8
++ fneg.s fr8, fr7
++ .global fabs_s
++fabs_s:
++ fabs.s fr15, fr15
++ fabs.s fr0, fr0
++ fabs.s fr0, fr15
++ fabs.s fr15, fr0
++ fabs.s fr7, fr8
++ fabs.s fr8, fr7
++ .global fcmp_s
++fcmp_s:
++ fcmp.s fr15, fr15
++ fcmp.s fr0, fr0
++ fcmp.s fr0, fr15
++ fcmp.s fr15, fr0
++ fcmp.s fr7, fr8
++ fcmp.s fr8, fr7
++ .global fadd_d
++fadd_d:
++ fadd.d fr14, fr14, fr14
++ fadd.d fr0, fr0, fr0
++ fadd.d fr0, fr14, fr14
++ fadd.d fr14, fr0, fr14
++ fadd.d fr14, fr14, fr0
++ fadd.d fr6, fr8, fr8
++ fadd.d fr8, fr6, fr8
++ fadd.d fr8, fr8, fr6
++ .global fsub_d
++fsub_d:
++ fsub.d fr14, fr14, fr14
++ fsub.d fr0, fr0, fr0
++ fsub.d fr0, fr14, fr14
++ fsub.d fr14, fr0, fr14
++ fsub.d fr14, fr14, fr0
++ fsub.d fr6, fr8, fr8
++ fsub.d fr8, fr6, fr8
++ fsub.d fr8, fr8, fr6
++ .global fmac_d
++fmac_d:
++ fmac.d fr14, fr14, fr14
++ fmac.d fr0, fr0, fr0
++ fmac.d fr0, fr14, fr14
++ fmac.d fr14, fr0, fr14
++ fmac.d fr14, fr14, fr0
++ fmac.d fr6, fr8, fr8
++ fmac.d fr8, fr6, fr8
++ fmac.d fr8, fr8, fr6
++ .global fnmac_d
++fnmac_d:
++ fnmac.d fr14, fr14, fr14
++ fnmac.d fr0, fr0, fr0
++ fnmac.d fr0, fr14, fr14
++ fnmac.d fr14, fr0, fr14
++ fnmac.d fr14, fr14, fr0
++ fnmac.d fr6, fr8, fr8
++ fnmac.d fr8, fr6, fr8
++ fnmac.d fr8, fr8, fr6
++ .global fmsc_d
++fmsc_d:
++ fmsc.d fr14, fr14, fr14
++ fmsc.d fr0, fr0, fr0
++ fmsc.d fr0, fr14, fr14
++ fmsc.d fr14, fr0, fr14
++ fmsc.d fr14, fr14, fr0
++ fmsc.d fr6, fr8, fr8
++ fmsc.d fr8, fr6, fr8
++ fmsc.d fr8, fr8, fr6
++ .global fnmsc_d
++fnmsc_d:
++ fnmsc.d fr14, fr14, fr14
++ fnmsc.d fr0, fr0, fr0
++ fnmsc.d fr0, fr14, fr14
++ fnmsc.d fr14, fr0, fr14
++ fnmsc.d fr14, fr14, fr0
++ fnmsc.d fr6, fr8, fr8
++ fnmsc.d fr8, fr6, fr8
++ fnmsc.d fr8, fr8, fr6
++ .global fmul_d
++fmul_d:
++ fmul.d fr14, fr14, fr14
++ fmul.d fr0, fr0, fr0
++ fmul.d fr0, fr14, fr14
++ fmul.d fr14, fr0, fr14
++ fmul.d fr14, fr14, fr0
++ fmul.d fr6, fr8, fr8
++ fmul.d fr8, fr6, fr8
++ fmul.d fr8, fr8, fr6
++ .global fnmul_d
++fnmul_d:
++ fnmul.d fr14, fr14, fr14
++ fnmul.d fr0, fr0, fr0
++ fnmul.d fr0, fr14, fr14
++ fnmul.d fr14, fr0, fr14
++ fnmul.d fr14, fr14, fr0
++ fnmul.d fr6, fr8, fr8
++ fnmul.d fr8, fr6, fr8
++ fnmul.d fr8, fr8, fr6
++ .global fneg_d
++fneg_d:
++ fneg.d fr14, fr14
++ fneg.d fr0, fr0
++ fneg.d fr0, fr14
++ fneg.d fr14, fr0
++ fneg.d fr6, fr8
++ fneg.d fr8, fr6
++ .global fabs_d
++fabs_d:
++ fabs.d fr14, fr14
++ fabs.d fr0, fr0
++ fabs.d fr0, fr14
++ fabs.d fr14, fr0
++ fabs.d fr6, fr8
++ fabs.d fr8, fr6
++ .global fcmp_d
++fcmp_d:
++ fcmp.d fr14, fr14
++ fcmp.d fr0, fr0
++ fcmp.d fr0, fr14
++ fcmp.d fr14, fr0
++ fcmp.d fr6, fr8
++ fcmp.d fr8, fr6
++ .global fmov_s
++fmov_s:
++ fmov.s fr15, fr15
++ fmov.s fr0, fr0
++ fmov.s fr15, fr0
++ fmov.s fr0, fr15
++ fmov.s fr8, fr7
++ fmov.s fr7, fr8
++ fmov.s pc, fr15
++ fmov.s r0, fr0
++ fmov.s pc, fr0
++ fmov.s r0, fr15
++ fmov.s r8, fr7
++ fmov.s r7, fr8
++ fmov.s fr15, pc
++ fmov.s fr0, r0
++ fmov.s fr15, r0
++ fmov.s fr0, pc
++ fmov.s fr8, r7
++ fmov.s fr7, r8
++ .global fmov_d
++fmov_d:
++ fmov.d fr14, fr14
++ fmov.d fr0, fr0
++ fmov.d fr14, fr0
++ fmov.d fr0, fr14
++ fmov.d fr8, fr6
++ fmov.d fr6, fr8
++ fmov.d lr, fr14
++ fmov.d r0, fr0
++ fmov.d lr, fr0
++ fmov.d r0, fr14
++ fmov.d r8, fr6
++ fmov.d r6, fr8
++ fmov.d fr14, lr
++ fmov.d fr0, r0
++ fmov.d fr14, r0
++ fmov.d fr0, lr
++ fmov.d fr8, r6
++ fmov.d fr6, r8
++ .global fcasts_d
++fcasts_d:
++ fcasts.d fr15, fr14
++ fcasts.d fr0, fr0
++ fcasts.d fr15, fr0
++ fcasts.d fr0, fr14
++ fcasts.d fr8, fr6
++ fcasts.d fr7, fr8
++ .global fcastd_s
++fcastd_s:
++ fcastd.s fr14, fr15
++ fcastd.s fr0, fr0
++ fcastd.s fr14, fr0
++ fcastd.s fr0, fr15
++ fcastd.s fr8, fr7
++ fcastd.s fr6, fr8
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/hwrd-lwrd.d
+@@ -0,0 +1,47 @@
++#as:
++#objdump: -dr
++#name: hwrd-lwrd
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <test_hwrd>:
++ 0: e0 60 87 65 mov r0,34661
++ 4: e0 60 12 34 mov r0,4660
++ 8: e0 60 00 00 mov r0,0
++ 8: R_AVR32_HI16 \.text\+0x60
++ c: e0 60 00 00 mov r0,0
++ c: R_AVR32_HI16 extsym1
++ 10: ea 10 87 65 orh r0,0x8765
++ 14: ea 10 12 34 orh r0,0x1234
++ 18: ea 10 00 00 orh r0,0x0
++ 18: R_AVR32_HI16 \.text\+0x60
++ 1c: ea 10 00 00 orh r0,0x0
++ 1c: R_AVR32_HI16 extsym1
++ 20: e4 10 87 65 andh r0,0x8765
++ 24: e4 10 12 34 andh r0,0x1234
++ 28: e4 10 00 00 andh r0,0x0
++ 28: R_AVR32_HI16 \.text\+0x60
++ 2c: e4 10 00 00 andh r0,0x0
++ 2c: R_AVR32_HI16 extsym1
++
++00000030 <test_lwrd>:
++ 30: e0 60 43 21 mov r0,17185
++ 34: e0 60 56 78 mov r0,22136
++ 38: e0 60 00 00 mov r0,0
++ 38: R_AVR32_LO16 \.text\+0x60
++ 3c: e0 60 00 00 mov r0,0
++ 3c: R_AVR32_LO16 extsym1
++ 40: e8 10 43 21 orl r0,0x4321
++ 44: e8 10 56 78 orl r0,0x5678
++ 48: e8 10 00 00 orl r0,0x0
++ 48: R_AVR32_LO16 \.text\+0x60
++ 4c: e8 10 00 00 orl r0,0x0
++ 4c: R_AVR32_LO16 extsym1
++ 50: e0 10 43 21 andl r0,0x4321
++ 54: e0 10 56 78 andl r0,0x5678
++ 58: e0 10 00 00 andl r0,0x0
++ 58: R_AVR32_LO16 \.text\+0x60
++ 5c: e0 10 00 00 andl r0,0x0
++ 5c: R_AVR32_LO16 extsym1
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/hwrd-lwrd.s
+@@ -0,0 +1,39 @@
++
++ .equ sym1, 0x12345678
++
++ .text
++ .global test_hwrd
++test_hwrd:
++ mov r0, hi(0x87654321)
++ mov r0, hi(sym1)
++ mov r0, hi(sym2)
++ mov r0, hi(extsym1)
++
++ orh r0, hi(0x87654321)
++ orh r0, hi(sym1)
++ orh r0, hi(sym2)
++ orh r0, hi(extsym1)
++
++ andh r0, hi(0x87654321)
++ andh r0, hi(sym1)
++ andh r0, hi(sym2)
++ andh r0, hi(extsym1)
++
++ .global test_lwrd
++test_lwrd:
++ mov r0, lo(0x87654321)
++ mov r0, lo(sym1)
++ mov r0, lo(sym2)
++ mov r0, lo(extsym1)
++
++ orl r0, lo(0x87654321)
++ orl r0, lo(sym1)
++ orl r0, lo(sym2)
++ orl r0, lo(extsym1)
++
++ andl r0, lo(0x87654321)
++ andl r0, lo(sym1)
++ andl r0, lo(sym2)
++ andl r0, lo(extsym1)
++
++sym2:
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/jmptable.d
+@@ -0,0 +1,20 @@
++#source: jmptable.s
++#as:
++#objdump: -dr
++#name: jmptable
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <jmptable_test>:
++ 0: fe c8 ff f4 sub r8,pc,-12
++ 4: f0 00 00 2f add pc,r8,r0<<0x2
++ 8: d7 03 nop
++ a: 00 00 add r0,r0
++ c: c0 38 rjmp 12 <jmptable_test\+0x12>
++ e: c0 38 rjmp 14 <jmptable_test\+0x14>
++ 10: c0 38 rjmp 16 <jmptable_test\+0x16>
++ 12: d7 03 nop
++ 14: d7 03 nop
++ 16: d7 03 nop
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/jmptable_linkrelax.d
+@@ -0,0 +1,25 @@
++#source: jmptable.s
++#as: --linkrelax
++#objdump: -dr
++#name: jmptable_linkrelax
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <jmptable_test>:
++ 0: fe c8 00 00 sub r8,pc,0
++ 0: R_AVR32_16N_PCREL \.text\+0xc
++ 4: f0 00 00 2f add pc,r8,r0<<0x2
++ 8: d7 03 nop
++ a: 00 00 add r0,r0
++ a: R_AVR32_ALIGN \*ABS\*\+0x2
++ c: c0 08 rjmp c <jmptable_test\+0xc>
++ c: R_AVR32_11H_PCREL \.text\+0x12
++ e: c0 08 rjmp e <jmptable_test\+0xe>
++ e: R_AVR32_11H_PCREL \.text\+0x14
++ 10: c0 08 rjmp 10 <jmptable_test\+0x10>
++ 10: R_AVR32_11H_PCREL \.text\+0x16
++ 12: d7 03 nop
++ 14: d7 03 nop
++ 16: d7 03 nop
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/jmptable.s
+@@ -0,0 +1,14 @@
++
++ .text
++ .global jmptable_test
++jmptable_test:
++ sub r8, pc, -(.L1 - .)
++ add pc, r8, r0 << 2
++ nop
++ .align 2
++.L1: rjmp 1f
++ rjmp 2f
++ rjmp 3f
++1: nop
++2: nop
++3: nop
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/lda_nopic.d
+@@ -0,0 +1,32 @@
++#source: lda.s
++#as:
++#objdump: -dr
++#name: lda_nopic
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <lda_test>:
++ 0: f2 c8 00 00 sub r8,r9,0
++
++00000004 <far_negative>:
++ 4: f6 ca 00 00 sub r10,r11,0
++ ...
++ 8000: fe c0 7f fc sub r0,pc,32764
++ 8004: 48 31 lddpc r1,8010 <far_negative\+0x800c>
++ 8006: 48 42 lddpc r2,8014 <far_negative\+0x8010>
++ 8008: 48 43 lddpc r3,8018 <far_negative\+0x8014>
++ 800a: 48 54 lddpc r4,801c <far_negative\+0x8018>
++ 800c: fe c5 80 04 sub r5,pc,-32764
++ ...
++ 8010: R_AVR32_32_CPENT \.text
++ 8014: R_AVR32_32_CPENT \.data
++ 8018: R_AVR32_32_CPENT undefined
++ 801c: R_AVR32_32_CPENT \.text\+0x1001c
++
++00010008 <far_positive>:
++ 10008: fa cc 00 00 sub r12,sp,0
++ ...
++0001001c <toofar_positive>:
++ 1001c: fe ce 00 00 sub lr,pc,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/lda_nopic_linkrelax.d
+@@ -0,0 +1,41 @@
++#source: lda.s
++#as: --linkrelax
++#objdump: -dr
++#name: lda_nopic_linkrelax
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <lda_test>:
++ 0: f2 c8 00 00 sub r8,r9,0
++
++00000004 <far_negative>:
++ 4: f6 ca 00 00 sub r10,r11,0
++ \.\.\.
++ 8000: 48 00 lddpc r0,8000 <far_negative\+0x7ffc>
++ 8000: R_AVR32_9W_CP \.text\+0x800c
++ 8002: 48 01 lddpc r1,8000 <far_negative\+0x7ffc>
++ 8002: R_AVR32_9W_CP \.text\+0x8010
++ 8004: 48 02 lddpc r2,8004 <far_negative\+0x8000>
++ 8004: R_AVR32_9W_CP \.text\+0x8014
++ 8006: 48 03 lddpc r3,8004 <far_negative\+0x8000>
++ 8006: R_AVR32_9W_CP \.text\+0x8018
++ 8008: 48 04 lddpc r4,8008 <far_negative\+0x8004>
++ 8008: R_AVR32_9W_CP \.text\+0x801c
++ 800a: 48 05 lddpc r5,8008 <far_negative\+0x8004>
++ 800a: R_AVR32_9W_CP \.text\+0x8020
++ \.\.\.
++ 800c: R_AVR32_ALIGN \*ABS\*\+0x2
++ 800c: R_AVR32_32_CPENT \.text\+0x4
++ 8010: R_AVR32_32_CPENT \.text
++ 8014: R_AVR32_32_CPENT \.data
++ 8018: R_AVR32_32_CPENT undefined
++ 801c: R_AVR32_32_CPENT \.text\+0x10020
++ 8020: R_AVR32_32_CPENT \.text\+0x1000c
++
++0001000c <far_positive>:
++ 1000c: fa cc 00 00 sub r12,sp,0
++ \.\.\.
++00010020 <toofar_positive>:
++ 10020: fe ce 00 00 sub lr,pc,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/lda_pic.d
+@@ -0,0 +1,32 @@
++#source: lda.s
++#as: --pic
++#objdump: -dr
++#name: lda_pic
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <lda_test>:
++ 0: f2 c8 00 00 sub r8,r9,0
++
++00000004 <far_negative>:
++ 4: f6 ca 00 00 sub r10,r11,0
++ ...
++ 8000: fe c0 7f fc sub r0,pc,32764
++ 8004: ec f1 00 00 ld.w r1,r6\[0\]
++ 8004: R_AVR32_GOT16S toofar_negative
++ 8008: ec f2 00 00 ld.w r2,r6\[0\]
++ 8008: R_AVR32_GOT16S different_section
++ 800c: ec f3 00 00 ld.w r3,r6\[0\]
++ 800c: R_AVR32_GOT16S undefined
++ 8010: ec f4 00 00 ld.w r4,r6\[0\]
++ 8010: R_AVR32_GOT16S toofar_positive
++ 8014: fe c5 80 14 sub r5,pc,-32748
++ ...
++
++00010000 <far_positive>:
++ 10000: fa cc 00 00 sub r12,sp,0
++ ...
++00010014 <toofar_positive>:
++ 10014: fe ce 00 00 sub lr,pc,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/lda_pic_linkrelax.d
+@@ -0,0 +1,40 @@
++#source: lda.s
++#as: --pic --linkrelax
++#objdump: -dr
++#name: lda_pic_linkrelax
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <lda_test>:
++ 0: f2 c8 00 00 sub r8,r9,0
++
++00000004 <far_negative>:
++ 4: f6 ca 00 00 sub r10,r11,0
++ ...
++ 8000: e0 60 00 00 mov r0,0
++ 8000: R_AVR32_LDA_GOT far_negative
++ 8004: ec 00 03 20 ld\.w r0,r6\[r0<<0x2\]
++ 8008: e0 61 00 00 mov r1,0
++ 8008: R_AVR32_LDA_GOT toofar_negative
++ 800c: ec 01 03 21 ld\.w r1,r6\[r1<<0x2\]
++ 8010: e0 62 00 00 mov r2,0
++ 8010: R_AVR32_LDA_GOT different_section
++ 8014: ec 02 03 22 ld\.w r2,r6\[r2<<0x2\]
++ 8018: e0 63 00 00 mov r3,0
++ 8018: R_AVR32_LDA_GOT undefined
++ 801c: ec 03 03 23 ld\.w r3,r6\[r3<<0x2\]
++ 8020: e0 64 00 00 mov r4,0
++ 8020: R_AVR32_LDA_GOT toofar_positive
++ 8024: ec 04 03 24 ld\.w r4,r6\[r4<<0x2\]
++ 8028: e0 65 00 00 mov r5,0
++ 8028: R_AVR32_LDA_GOT far_positive
++ 802c: ec 05 03 25 ld\.w r5,r6\[r5<<0x2\]
++ ...
++
++00010018 <far_positive>:
++ 10018: fa cc 00 00 sub r12,sp,0
++ ...
++0001002c <toofar_positive>:
++ 1002c: fe ce 00 00 sub lr,pc,0
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/lda.s
+@@ -0,0 +1,30 @@
++
++ .text
++ .global lda_test
++lda_test:
++toofar_negative:
++ sub r8, r9, 0
++far_negative:
++ sub r10, r11, 0
++
++ .fill 32760, 1, 0x00
++
++ lda.w r0, far_negative
++ lda.w r1, toofar_negative
++ lda.w r2, different_section
++ lda.w r3, undefined
++ lda.w r4, toofar_positive
++ lda.w r5, far_positive
++
++ .cpool
++
++ .fill 32744, 1, 0x00
++far_positive:
++ sub r12, sp, 0
++ .fill 16, 1, 0x00
++toofar_positive:
++ sub lr, pc, 0
++
++ .data
++different_section:
++ .long 0x12345678
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/pcrel.d
+@@ -0,0 +1,64 @@
++#as:
++#objdump: -dr
++#name: pcrel
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <test_rjmp>:
++ 0: d7 03 nop
++ 2: c0 28 rjmp 6 <test_rjmp\+0x6>
++ 4: d7 03 nop
++ 6: e0 8f 00 00 bral 6 <test_rjmp\+0x6>
++ 6: R_AVR32_22H_PCREL extsym10
++
++0000000a <test_rcall>:
++ a: d7 03 nop
++0000000c <test_rcall2>:
++ c: c0 2c rcall 10 <test_rcall2\+0x4>
++ e: d7 03 nop
++ 10: e0 a0 00 00 rcall 10 <test_rcall2\+0x4>
++ 10: R_AVR32_22H_PCREL extsym21
++
++00000014 <test_branch>:
++ 14: c0 31 brne 1a <test_branch\+0x6>
++ 16: e0 8f 00 00 bral 16 <test_branch\+0x2>
++ 16: R_AVR32_22H_PCREL test_branch
++ 1a: e0 80 00 00 breq 1a <test_branch\+0x6>
++ 1a: R_AVR32_22H_PCREL extsym21
++
++0000001e <test_lddpc>:
++ 1e: 48 30 lddpc r0,28 <sym1>
++ 20: 48 20 lddpc r0,28 <sym1>
++ 22: fe f0 00 00 ld.w r0,pc\[0\]
++ 22: R_AVR32_16B_PCREL extsym16
++ \.\.\.
++
++00000028 <sym1>:
++ 28: d7 03 nop
++ 2a: d7 03 nop
++
++0000002c <test_local>:
++ 2c: 48 20 lddpc r0,34 <test_local\+0x8>
++ 2e: 48 30 lddpc r0,38 <test_local\+0xc>
++ 30: 48 20 lddpc r0,38 <test_local\+0xc>
++ 32: 00 00 add r0,r0
++ 34: d7 03 nop
++ 36: d7 03 nop
++ 38: d7 03 nop
++ 3a: d7 03 nop
++
++Disassembly of section \.text\.init:
++
++00000000 <test_inter_section>:
++ 0: e0 a0 .. .. rcall [0-9a-f]+ <.*>
++ 0: R_AVR32_22H_PCREL test_rcall
++ 4: d7 03 nop
++ 6: e0 a0 .. .. rcall [0-9a-f]+ <.*>
++ 6: R_AVR32_22H_PCREL test_rcall
++ a: e0 a0 .. .. rcall [0-9a-z]+ <.*>
++ a: R_AVR32_22H_PCREL \.text\+0xc
++ e: d7 03 nop
++ 10: e0 a0 .. .. rcall [0-9a-f]+ <.*>
++ 10: R_AVR32_22H_PCREL \.text\+0xc
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/pcrel.s
+@@ -0,0 +1,57 @@
++
++ .text
++ .global test_rjmp
++test_rjmp:
++ nop
++ rjmp 0f
++ nop
++0: rjmp extsym10
++
++ .global test_rcall
++test_rcall:
++ nop
++test_rcall2:
++ rcall 0f
++ nop
++0: rcall extsym21
++
++ .global test_branch
++test_branch:
++ brne 0f
++ /* This will generate a reloc since test_branch is global */
++ bral test_branch
++0: breq extsym21
++
++ .global test_lddpc
++test_lddpc:
++ lddpc r0,sym1
++ lddpc r0,sym1
++ lddpc r0,extsym16
++
++ .align 2
++sym1: nop
++ nop
++
++ .global test_local
++test_local:
++ lddpc r0, .LC1
++ lddpc r0, .LC2
++ lddpc r0, .LC1 + 0x4
++
++ .align 2
++.LC1:
++ nop
++ nop
++.LC2:
++ nop
++ nop
++
++ .section .text.init,"ax"
++ .global test_inter_section
++test_inter_section:
++ rcall test_rcall
++ nop
++ rcall test_rcall
++ rcall test_rcall2
++ nop
++ rcall test_rcall2
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/pico.d
+@@ -0,0 +1,149 @@
++#as:
++#objdump: -dr
++#name: pico
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++[0-9a-f]* <picosvmac>:
++ *[0-9a-f]*: e1 a6 20 00 cop cp1,cr0,cr0,cr0,0xc
++ *[0-9a-f]*: e1 a7 2b bb cop cp1,cr11,cr11,cr11,0xe
++ *[0-9a-f]*: e1 a6 3a 05 cop cp1,cr10,cr0,cr5,0xd
++ *[0-9a-f]*: e1 a7 36 90 cop cp1,cr6,cr9,cr0,0xf
++
++[0-9a-f]* <picosvmul>:
++ *[0-9a-f]*: e1 a4 20 00 cop cp1,cr0,cr0,cr0,0x8
++ *[0-9a-f]*: e1 a5 2b bb cop cp1,cr11,cr11,cr11,0xa
++ *[0-9a-f]*: e1 a4 3a 05 cop cp1,cr10,cr0,cr5,0x9
++ *[0-9a-f]*: e1 a5 36 90 cop cp1,cr6,cr9,cr0,0xb
++
++[0-9a-f]* <picovmac>:
++ *[0-9a-f]*: e1 a2 20 00 cop cp1,cr0,cr0,cr0,0x4
++ *[0-9a-f]*: e1 a3 2b bb cop cp1,cr11,cr11,cr11,0x6
++ *[0-9a-f]*: e1 a2 3a 05 cop cp1,cr10,cr0,cr5,0x5
++ *[0-9a-f]*: e1 a3 36 90 cop cp1,cr6,cr9,cr0,0x7
++
++[0-9a-f]* <picovmul>:
++ *[0-9a-f]*: e1 a0 20 00 cop cp1,cr0,cr0,cr0,0x0
++ *[0-9a-f]*: e1 a1 2b bb cop cp1,cr11,cr11,cr11,0x2
++ *[0-9a-f]*: e1 a0 3a 05 cop cp1,cr10,cr0,cr5,0x1
++ *[0-9a-f]*: e1 a1 36 90 cop cp1,cr6,cr9,cr0,0x3
++
++[0-9a-f]* <picold_d>:
++ *[0-9a-f]*: e9 af 3e ff ldc\.d cp1,cr14,pc\[0x3fc\]
++ *[0-9a-f]*: e9 a0 30 ff ldc\.d cp1,cr0,r0\[0x3fc\]
++ *[0-9a-f]*: e9 a0 30 00 ldc\.d cp1,cr0,r0\[0x0\]
++ *[0-9a-f]*: ef a8 26 50 ldc\.d cp1,cr6,--r8
++ *[0-9a-f]*: ef a7 28 50 ldc\.d cp1,cr8,--r7
++ *[0-9a-f]*: ef aa 32 65 ldc\.d cp1,cr2,r10\[r5<<0x2\]
++ *[0-9a-f]*: ef a3 3c 46 ldc\.d cp1,cr12,r3\[r6\]
++
++[0-9a-f]* <picold_w>:
++ *[0-9a-f]*: e9 af 2f ff ldc\.w cp1,cr15,pc\[0x3fc\]
++ *[0-9a-f]*: e9 a0 20 ff ldc\.w cp1,cr0,r0\[0x3fc\]
++ *[0-9a-f]*: e9 a0 20 00 ldc\.w cp1,cr0,r0\[0x0\]
++ *[0-9a-f]*: ef a8 27 40 ldc\.w cp1,cr7,--r8
++ *[0-9a-f]*: ef a7 28 40 ldc\.w cp1,cr8,--r7
++ *[0-9a-f]*: ef aa 31 25 ldc\.w cp1,cr1,r10\[r5<<0x2\]
++ *[0-9a-f]*: ef a3 3d 06 ldc\.w cp1,cr13,r3\[r6\]
++
++[0-9a-f]* <picoldm_d>:
++ *[0-9a-f]*: ed af 24 ff ldcm\.d cp1,pc,cr0-cr15
++ *[0-9a-f]*: ed a0 24 01 ldcm\.d cp1,r0,cr0-cr1
++ *[0-9a-f]*: ed a7 24 80 ldcm\.d cp1,r7,cr14-cr15
++ *[0-9a-f]*: ed a8 24 7f ldcm\.d cp1,r8,cr0-cr13
++
++[0-9a-f]* <picoldm_d_pu>:
++ *[0-9a-f]*: ed af 34 ff ldcm\.d cp1,pc\+\+,cr0-cr15
++ *[0-9a-f]*: ed a0 34 01 ldcm\.d cp1,r0\+\+,cr0-cr1
++ *[0-9a-f]*: ed a7 34 80 ldcm\.d cp1,r7\+\+,cr14-cr15
++ *[0-9a-f]*: ed a8 34 7f ldcm\.d cp1,r8\+\+,cr0-cr13
++
++[0-9a-f]* <picoldm_w>:
++ *[0-9a-f]*: ed af 20 ff ldcm\.w cp1,pc,cr0-cr7
++ *[0-9a-f]*: ed a0 20 01 ldcm\.w cp1,r0,cr0
++ *[0-9a-f]*: ed a7 20 80 ldcm\.w cp1,r7,cr7
++ *[0-9a-f]*: ed a8 20 7f ldcm\.w cp1,r8,cr0-cr6
++ *[0-9a-f]*: ed af 21 ff ldcm\.w cp1,pc,cr8-cr15
++ *[0-9a-f]*: ed a0 21 01 ldcm\.w cp1,r0,cr8
++ *[0-9a-f]*: ed a7 21 80 ldcm\.w cp1,r7,cr15
++ *[0-9a-f]*: ed a8 21 7f ldcm\.w cp1,r8,cr8-cr14
++
++[0-9a-f]* <picoldm_w_pu>:
++ *[0-9a-f]*: ed af 30 ff ldcm\.w cp1,pc\+\+,cr0-cr7
++ *[0-9a-f]*: ed a0 30 01 ldcm\.w cp1,r0\+\+,cr0
++ *[0-9a-f]*: ed a7 30 80 ldcm\.w cp1,r7\+\+,cr7
++ *[0-9a-f]*: ed a8 30 7f ldcm\.w cp1,r8\+\+,cr0-cr6
++ *[0-9a-f]*: ed af 31 ff ldcm\.w cp1,pc\+\+,cr8-cr15
++ *[0-9a-f]*: ed a0 31 01 ldcm\.w cp1,r0\+\+,cr8
++ *[0-9a-f]*: ed a7 31 80 ldcm\.w cp1,r7\+\+,cr15
++ *[0-9a-f]*: ed a8 31 7f ldcm\.w cp1,r8\+\+,cr8-cr14
++
++[0-9a-f]* <picomv_d>:
++ *[0-9a-f]*: ef ae 2e 30 mvrc\.d cp1,cr14,lr
++ *[0-9a-f]*: ef a0 20 30 mvrc\.d cp1,cr0,r0
++ *[0-9a-f]*: ef a8 26 30 mvrc\.d cp1,cr6,r8
++ *[0-9a-f]*: ef a6 28 30 mvrc\.d cp1,cr8,r6
++ *[0-9a-f]*: ef ae 2e 10 mvcr\.d cp1,lr,cr14
++ *[0-9a-f]*: ef a0 20 10 mvcr\.d cp1,r0,cr0
++ *[0-9a-f]*: ef a8 26 10 mvcr\.d cp1,r8,cr6
++ *[0-9a-f]*: ef a6 28 10 mvcr\.d cp1,r6,cr8
++
++[0-9a-f]* <picomv_w>:
++ *[0-9a-f]*: ef af 2f 20 mvrc\.w cp1,cr15,pc
++ *[0-9a-f]*: ef a0 20 20 mvrc\.w cp1,cr0,r0
++ *[0-9a-f]*: ef a8 27 20 mvrc\.w cp1,cr7,r8
++ *[0-9a-f]*: ef a7 28 20 mvrc\.w cp1,cr8,r7
++ *[0-9a-f]*: ef af 2f 00 mvcr\.w cp1,pc,cr15
++ *[0-9a-f]*: ef a0 20 00 mvcr\.w cp1,r0,cr0
++ *[0-9a-f]*: ef a8 27 00 mvcr\.w cp1,r8,cr7
++ *[0-9a-f]*: ef a7 28 00 mvcr\.w cp1,r7,cr8
++
++[0-9a-f]* <picost_d>:
++ *[0-9a-f]*: eb af 3e ff stc\.d cp1,pc\[0x3fc\],cr14
++ *[0-9a-f]*: eb a0 30 00 stc\.d cp1,r0\[0x0\],cr0
++ *[0-9a-f]*: ef a8 26 70 stc\.d cp1,r8\+\+,cr6
++ *[0-9a-f]*: ef a7 28 70 stc\.d cp1,r7\+\+,cr8
++ *[0-9a-f]*: ef aa 32 e5 stc\.d cp1,r10\[r5<<0x2\],cr2
++ *[0-9a-f]*: ef a3 3c c6 stc\.d cp1,r3\[r6\],cr12
++
++[0-9a-f]* <picost_w>:
++ *[0-9a-f]*: eb af 2f ff stc\.w cp1,pc\[0x3fc\],cr15
++ *[0-9a-f]*: eb a0 20 00 stc\.w cp1,r0\[0x0\],cr0
++ *[0-9a-f]*: ef a8 27 60 stc\.w cp1,r8\+\+,cr7
++ *[0-9a-f]*: ef a7 28 60 stc\.w cp1,r7\+\+,cr8
++ *[0-9a-f]*: ef aa 31 a5 stc\.w cp1,r10\[r5<<0x2\],cr1
++ *[0-9a-f]*: ef a3 3d 86 stc\.w cp1,r3\[r6\],cr13
++
++[0-9a-f]* <picostm_d>:
++ *[0-9a-f]*: ed af 25 ff stcm\.d cp1,pc,cr0-cr15
++ *[0-9a-f]*: ed a0 25 01 stcm\.d cp1,r0,cr0-cr1
++ *[0-9a-f]*: ed a7 25 80 stcm\.d cp1,r7,cr14-cr15
++ *[0-9a-f]*: ed a8 25 7f stcm\.d cp1,r8,cr0-cr13
++
++[0-9a-f]* <picostm_d_pu>:
++ *[0-9a-f]*: ed af 35 ff stcm\.d cp1,--pc,cr0-cr15
++ *[0-9a-f]*: ed a0 35 01 stcm\.d cp1,--r0,cr0-cr1
++ *[0-9a-f]*: ed a7 35 80 stcm\.d cp1,--r7,cr14-cr15
++ *[0-9a-f]*: ed a8 35 7f stcm\.d cp1,--r8,cr0-cr13
++
++[0-9a-f]* <picostm_w>:
++ *[0-9a-f]*: ed af 22 ff stcm\.w cp1,pc,cr0-cr7
++ *[0-9a-f]*: ed a0 22 01 stcm\.w cp1,r0,cr0
++ *[0-9a-f]*: ed a7 22 80 stcm\.w cp1,r7,cr7
++ *[0-9a-f]*: ed a8 22 7f stcm\.w cp1,r8,cr0-cr6
++ *[0-9a-f]*: ed af 23 ff stcm\.w cp1,pc,cr8-cr15
++ *[0-9a-f]*: ed a0 23 01 stcm\.w cp1,r0,cr8
++ *[0-9a-f]*: ed a7 23 80 stcm\.w cp1,r7,cr15
++ *[0-9a-f]*: ed a8 23 7f stcm\.w cp1,r8,cr8-cr14
++
++[0-9a-f]* <picostm_w_pu>:
++ *[0-9a-f]*: ed af 32 ff stcm\.w cp1,--pc,cr0-cr7
++ *[0-9a-f]*: ed a0 32 01 stcm\.w cp1,--r0,cr0
++ *[0-9a-f]*: ed a7 32 80 stcm\.w cp1,--r7,cr7
++ *[0-9a-f]*: ed a8 32 7f stcm\.w cp1,--r8,cr0-cr6
++ *[0-9a-f]*: ed af 33 ff stcm\.w cp1,--pc,cr8-cr15
++ *[0-9a-f]*: ed a0 33 01 stcm\.w cp1,--r0,cr8
++ *[0-9a-f]*: ed a7 33 80 stcm\.w cp1,--r7,cr15
++ *[0-9a-f]*: ed a8 33 7f stcm\.w cp1,--r8,cr8-cr14
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/pico.s
+@@ -0,0 +1,144 @@
++
++ .text
++ .global picosvmac
++picosvmac:
++ picosvmac out0, in0, in0, in0
++ picosvmac out2, in11, in11, in11
++ picosvmac out1, in10, in0, in5
++ picosvmac out3, in6, in9, in0
++ .global picosvmul
++picosvmul:
++ picosvmul out0, in0, in0, in0
++ picosvmul out2, in11, in11, in11
++ picosvmul out1, in10, in0, in5
++ picosvmul out3, in6, in9, in0
++ .global picovmac
++picovmac:
++ picovmac out0, in0, in0, in0
++ picovmac out2, in11, in11, in11
++ picovmac out1, in10, in0, in5
++ picovmac out3, in6, in9, in0
++ .global picovmul
++picovmul:
++ picovmul out0, in0, in0, in0
++ picovmul out2, in11, in11, in11
++ picovmul out1, in10, in0, in5
++ picovmul out3, in6, in9, in0
++ .global picold_d
++picold_d:
++ picold.d vmu2_out, pc[1020]
++ picold.d inpix2, r0[1020]
++ picold.d inpix2, r0[0]
++ picold.d coeff0_a, --r8
++ picold.d coeff1_a, --r7
++ picold.d inpix0, r10[r5 << 2]
++ picold.d vmu0_out, r3[r6 << 0]
++ .global picold_w
++picold_w:
++ picold.w config, pc[1020]
++ picold.w inpix2, r0[1020]
++ picold.w inpix2, r0[0]
++ picold.w coeff0_b, --r8
++ picold.w coeff1_a, --r7
++ picold.w inpix1, r10[r5 << 2]
++ picold.w vmu1_out, r3[r6 << 0]
++ .global picoldm_d
++picoldm_d:
++ picoldm.d pc, inpix2-config
++ picoldm.d r0, inpix2, inpix1
++ picoldm.d r7, vmu2_out, config
++ picoldm.d r8, inpix2-vmu1_out
++ .global picoldm_d_pu
++picoldm_d_pu:
++ picoldm.d pc++, inpix2, inpix1, inpix0, outpix2, outpix1, outpix0, coeff0_a, coeff0_b, coeff1_a, coeff1_b, coeff2_a, coeff2_b, vmu0_out, vmu1_out, vmu2_out, config
++ picoldm.d r0++, inpix2, inpix1
++ picoldm.d r7++, vmu2_out, config
++ picoldm.d r8++, inpix2, inpix1, inpix0, outpix2, outpix1, outpix0, coeff0_a, coeff0_b, coeff1_a, coeff1_b, coeff2_a, coeff2_b, vmu0_out, vmu1_out
++ .global picoldm_w
++picoldm_w:
++ picoldm.w pc, inpix2-coeff0_b
++ picoldm.w r0, inpix2
++ picoldm.w r7, coeff0_b
++ picoldm.w r8, inpix2-coeff0_a
++ picoldm.w pc, coeff1_a-config
++ picoldm.w r0, coeff1_a
++ picoldm.w r7, config
++ picoldm.w r8, coeff1_a-vmu2_out
++ .global picoldm_w_pu
++picoldm_w_pu:
++ picoldm.w pc++, inpix2-coeff0_b
++ picoldm.w r0++, inpix2
++ picoldm.w r7++, coeff0_b
++ picoldm.w r8++, inpix2-coeff0_a
++ picoldm.w pc++, coeff1_a-config
++ picoldm.w r0++, coeff1_a
++ picoldm.w r7++, config
++ picoldm.w r8++, coeff1_a-vmu2_out
++ .global picomv_d
++picomv_d:
++ picomv.d vmu2_out, lr
++ picomv.d inpix2, r0
++ picomv.d coeff0_a, r8
++ picomv.d coeff1_a, r6
++ picomv.d pc, vmu2_out
++ picomv.d r0, inpix2
++ picomv.d r8, coeff0_a
++ picomv.d r6, coeff1_a
++ .global picomv_w
++picomv_w:
++ picomv.w config, pc
++ picomv.w inpix2, r0
++ picomv.w coeff0_b, r8
++ picomv.w coeff1_a, r7
++ picomv.w pc, config
++ picomv.w r0, inpix2
++ picomv.w r8, coeff0_b
++ picomv.w r7, coeff1_a
++ .global picost_d
++picost_d:
++ picost.d pc[1020], vmu2_out
++ picost.d r0[0], inpix2
++ picost.d r8++, coeff0_a
++ picost.d r7++, coeff1_a
++ picost.d r10[r5 << 2], inpix0
++ picost.d r3[r6 << 0], vmu0_out
++ .global picost_w
++picost_w:
++ picost.w pc[1020], config
++ picost.w r0[0], inpix2
++ picost.w r8++, coeff0_b
++ picost.w r7++, coeff1_a
++ picost.w r10[r5 << 2], inpix1
++ picost.w r3[r6 << 0], vmu1_out
++ .global picostm_d
++picostm_d:
++ picostm.d pc, inpix2-config
++ picostm.d r0, inpix2, inpix1
++ picostm.d r7, vmu2_out, config
++ picostm.d r8, inpix2-vmu1_out
++ .global picostm_d_pu
++picostm_d_pu:
++ picostm.d --pc, inpix2, inpix1, inpix0, outpix2, outpix1, outpix0, coeff0_a, coeff0_b, coeff1_a, coeff1_b, coeff2_a, coeff2_b, vmu0_out, vmu1_out, vmu2_out, config
++ picostm.d --r0, inpix2, inpix1
++ picostm.d --r7, vmu2_out, config
++ picostm.d --r8, inpix2, inpix1, inpix0, outpix2, outpix1, outpix0, coeff0_a, coeff0_b, coeff1_a, coeff1_b, coeff2_a, coeff2_b, vmu0_out, vmu1_out
++ .global picostm_w
++picostm_w:
++ picostm.w pc, inpix2-coeff0_b
++ picostm.w r0, inpix2
++ picostm.w r7, coeff0_b
++ picostm.w r8, inpix2-coeff0_a
++ picostm.w pc, coeff1_a-config
++ picostm.w r0, coeff1_a
++ picostm.w r7, config
++ picostm.w r8, coeff1_a-vmu2_out
++ .global picostm_w_pu
++picostm_w_pu:
++ picostm.w --pc, inpix2-coeff0_b
++ picostm.w --r0, inpix2
++ picostm.w --r7, coeff0_b
++ picostm.w --r8, inpix2-coeff0_a
++ picostm.w --pc, coeff1_a-config
++ picostm.w --r0, coeff1_a
++ picostm.w --r7, config
++ picostm.w --r8, coeff1_a-vmu2_out
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/pic_reloc.d
+@@ -0,0 +1,27 @@
++#as:
++#objdump: -dr
++#name: pic_reloc
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <mcall_got>:
++ 0: f0 16 00 00 mcall r6\[0\]
++ 0: R_AVR32_GOT18SW extfunc
++ 4: f0 16 00 00 mcall r6\[0\]
++ 4: R_AVR32_GOT18SW \.L1
++ 8: f0 16 00 00 mcall r6\[0\]
++ 8: R_AVR32_GOT18SW \.L2
++ c: f0 16 00 00 mcall r6\[0\]
++ c: R_AVR32_GOT18SW mcall_got
++
++00000010 <ldw_got>:
++ 10: ec f0 00 00 ld.w r0,r6\[0\]
++ 10: R_AVR32_GOT16S extvar
++ 14: ec f0 00 00 ld.w r0,r6\[0\]
++ 14: R_AVR32_GOT16S \.L3
++ 18: ec f0 00 00 ld.w r0,r6\[0\]
++ 18: R_AVR32_GOT16S \.L4
++ 1c: ec f0 00 00 ld.w r0,r6\[0\]
++ 1c: R_AVR32_GOT16S ldw_got
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/pic_reloc.s
+@@ -0,0 +1,18 @@
++
++ .text
++ .global mcall_got
++mcall_got:
++.L1:
++ mcall r6[extfunc@got]
++ mcall r6[.L1@got]
++ mcall r6[.L2@got]
++ mcall r6[mcall_got@got]
++.L2:
++
++ .global ldw_got
++ldw_got:
++.L3: ld.w r0,r6[extvar@got]
++ ld.w r0,r6[.L3@got]
++ ld.w r0,r6[.L4@got]
++ ld.w r0,r6[ldw_got@got]
++.L4:
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/symdiff.d
+@@ -0,0 +1,24 @@
++#source: symdiff.s
++#as:
++#objdump: -dr
++#name: symdiff
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <diff32>:
++ 0: 00 00 add r0,r0
++ 2: 00 04 add r4,r0
++
++00000004 <diff16>:
++ 4: 00 04 add r4,r0
++
++00000006 <diff8>:
++ 6: 04 00 add r0,r2
++
++00000008 <symdiff_test>:
++ 8: d7 03 nop
++ a: d7 03 nop
++ c: d7 03 nop
++ e: d7 03 nop
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/symdiff_linkrelax.d
+@@ -0,0 +1,28 @@
++#source: symdiff.s
++#as: --linkrelax
++#objdump: -dr
++#name: symdiff_linkrelax
++
++.*: +file format .*
++
++Disassembly of section \.text:
++
++00000000 <diff32>:
++ 0: 00 00 add r0,r0
++ 0: R_AVR32_DIFF32 \.text\+0xa
++ 2: 00 04 add r4,r0
++
++00000004 <diff16>:
++ 4: 00 04 add r4,r0
++ 4: R_AVR32_DIFF16 \.text\+0xa
++
++00000006 <diff8>:
++ 6: 04 00 add r0,r2
++ 6: R_AVR32_DIFF8 \.text\+0xa
++ 7: R_AVR32_ALIGN \*ABS\*\+0x1
++
++00000008 <symdiff_test>:
++ 8: d7 03 nop
++ a: d7 03 nop
++ c: d7 03 nop
++ e: d7 03 nop
+--- /dev/null
++++ b/gas/testsuite/gas/avr32/symdiff.s
+@@ -0,0 +1,19 @@
++
++ .text
++ .global diff32
++diff32:
++ .long .L2 - .L1
++ .global diff16
++diff16:
++ .short .L2 - .L1
++ .global diff8
++diff8:
++ .byte .L2 - .L1
++
++ .global symdiff_test
++ .align 1
++symdiff_test:
++ nop
++.L1: nop
++ nop
++.L2: nop
+--- a/gas/write.c
++++ b/gas/write.c
+@@ -2011,6 +2011,10 @@ relax_frag (segT segment, fragS *fragP,
+
+ #endif /* defined (TC_GENERIC_RELAX_TABLE) */
+
++#ifdef TC_RELAX_ALIGN
++#define RELAX_ALIGN(SEG, FRAG, ADDR) TC_RELAX_ALIGN(SEG, FRAG, ADDR)
++#else
++#define RELAX_ALIGN(SEG, FRAG, ADDR) relax_align(ADDR, (FRAG)->fr_offset)
+ /* Relax_align. Advance location counter to next address that has 'alignment'
+ lowest order bits all 0s, return size of adjustment made. */
+ static relax_addressT
+@@ -2030,6 +2034,7 @@ relax_align (register relax_addressT add
+ #endif
+ return (new_address - address);
+ }
++#endif
+
+ /* Now we have a segment, not a crowd of sub-segments, we can make
+ fr_address values.
+@@ -2073,7 +2078,7 @@ relax_segment (struct frag *segment_frag
+ case rs_align_code:
+ case rs_align_test:
+ {
+- addressT offset = relax_align (address, (int) fragP->fr_offset);
++ addressT offset = RELAX_ALIGN(segment, fragP, address);
+
+ if (fragP->fr_subtype != 0 && offset > fragP->fr_subtype)
+ offset = 0;
+@@ -2280,10 +2285,10 @@ relax_segment (struct frag *segment_frag
+ {
+ addressT oldoff, newoff;
+
+- oldoff = relax_align (was_address + fragP->fr_fix,
+- (int) offset);
+- newoff = relax_align (address + fragP->fr_fix,
+- (int) offset);
++ oldoff = RELAX_ALIGN (segment, fragP,
++ was_address + fragP->fr_fix);
++ newoff = RELAX_ALIGN (segment, fragP,
++ address + fragP->fr_fix);
+
+ if (fragP->fr_subtype != 0)
+ {
+--- a/include/dis-asm.h
++++ b/include/dis-asm.h
+@@ -222,6 +222,7 @@ typedef int (*disassembler_ftype) (bfd_v
+
+ extern int print_insn_alpha (bfd_vma, disassemble_info *);
+ extern int print_insn_avr (bfd_vma, disassemble_info *);
++extern int print_insn_avr32 (bfd_vma, disassemble_info *);
+ extern int print_insn_bfin (bfd_vma, disassemble_info *);
+ extern int print_insn_big_arm (bfd_vma, disassemble_info *);
+ extern int print_insn_big_mips (bfd_vma, disassemble_info *);
+@@ -304,7 +305,9 @@ extern void print_i386_disassembler_opti
+ extern void print_mips_disassembler_options (FILE *);
+ extern void print_ppc_disassembler_options (FILE *);
+ extern void print_arm_disassembler_options (FILE *);
++extern void print_avr32_disassembler_options (FILE *);
+ extern void parse_arm_disassembler_option (char *);
++extern void parse_avr32_disassembler_option (char *);
+ extern void print_s390_disassembler_options (FILE *);
+ extern int get_arm_regname_num_options (void);
+ extern int set_arm_regname_option (int);
+--- /dev/null
++++ b/include/elf/avr32.h
+@@ -0,0 +1,98 @@
++/* AVR32 ELF support for BFD.
++ Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++
++#include "elf/reloc-macros.h"
++
++/* CPU-specific flags for the ELF header e_flags field */
++#define EF_AVR32_LINKRELAX 0x01
++#define EF_AVR32_PIC 0x02
++
++START_RELOC_NUMBERS (elf_avr32_reloc_type)
++ RELOC_NUMBER (R_AVR32_NONE, 0)
++
++ /* Data Relocations */
++ RELOC_NUMBER (R_AVR32_32, 1)
++ RELOC_NUMBER (R_AVR32_16, 2)
++ RELOC_NUMBER (R_AVR32_8, 3)
++ RELOC_NUMBER (R_AVR32_32_PCREL, 4)
++ RELOC_NUMBER (R_AVR32_16_PCREL, 5)
++ RELOC_NUMBER (R_AVR32_8_PCREL, 6)
++ RELOC_NUMBER (R_AVR32_DIFF32, 7)
++ RELOC_NUMBER (R_AVR32_DIFF16, 8)
++ RELOC_NUMBER (R_AVR32_DIFF8, 9)
++ RELOC_NUMBER (R_AVR32_GOT32, 10)
++ RELOC_NUMBER (R_AVR32_GOT16, 11)
++ RELOC_NUMBER (R_AVR32_GOT8, 12)
++
++ /* Normal Code Relocations */
++ RELOC_NUMBER (R_AVR32_21S, 13)
++ RELOC_NUMBER (R_AVR32_16U, 14)
++ RELOC_NUMBER (R_AVR32_16S, 15)
++ RELOC_NUMBER (R_AVR32_8S, 16)
++ RELOC_NUMBER (R_AVR32_8S_EXT, 17)
++
++ /* PC-Relative Code Relocations */
++ RELOC_NUMBER (R_AVR32_22H_PCREL, 18)
++ RELOC_NUMBER (R_AVR32_18W_PCREL, 19)
++ RELOC_NUMBER (R_AVR32_16B_PCREL, 20)
++ RELOC_NUMBER (R_AVR32_16N_PCREL, 21)
++ RELOC_NUMBER (R_AVR32_14UW_PCREL, 22)
++ RELOC_NUMBER (R_AVR32_11H_PCREL, 23)
++ RELOC_NUMBER (R_AVR32_10UW_PCREL, 24)
++ RELOC_NUMBER (R_AVR32_9H_PCREL, 25)
++ RELOC_NUMBER (R_AVR32_9UW_PCREL, 26)
++
++ /* Special Code Relocations */
++ RELOC_NUMBER (R_AVR32_HI16, 27)
++ RELOC_NUMBER (R_AVR32_LO16, 28)
++
++ /* PIC Relocations */
++ RELOC_NUMBER (R_AVR32_GOTPC, 29)
++ RELOC_NUMBER (R_AVR32_GOTCALL, 30)
++ RELOC_NUMBER (R_AVR32_LDA_GOT, 31)
++ RELOC_NUMBER (R_AVR32_GOT21S, 32)
++ RELOC_NUMBER (R_AVR32_GOT18SW, 33)
++ RELOC_NUMBER (R_AVR32_GOT16S, 34)
++ RELOC_NUMBER (R_AVR32_GOT7UW, 35)
++
++ /* Constant Pool Relocations */
++ RELOC_NUMBER (R_AVR32_32_CPENT, 36)
++ RELOC_NUMBER (R_AVR32_CPCALL, 37)
++ RELOC_NUMBER (R_AVR32_16_CP, 38)
++ RELOC_NUMBER (R_AVR32_9W_CP, 39)
++
++ /* Dynamic Relocations */
++ RELOC_NUMBER (R_AVR32_RELATIVE, 40)
++ RELOC_NUMBER (R_AVR32_GLOB_DAT, 41)
++ RELOC_NUMBER (R_AVR32_JMP_SLOT, 42)
++
++ /* Linkrelax Information */
++ RELOC_NUMBER (R_AVR32_ALIGN, 43)
++
++ RELOC_NUMBER (R_AVR32_15S, 44)
++
++END_RELOC_NUMBERS (R_AVR32_max)
++
++/* Processor specific dynamic array tags. */
++
++/* The total size in bytes of the Global Offset Table */
++#define DT_AVR32_GOTSZ 0x70000001
+--- a/include/elf/common.h
++++ b/include/elf/common.h
+@@ -286,7 +286,7 @@
+ #define EM_INTEL182 182 /* Reserved by Intel */
+ #define EM_res183 183 /* Reserved by ARM */
+ #define EM_res184 184 /* Reserved by ARM */
+-#define EM_AVR32 185 /* Atmel Corporation 32-bit microprocessor family */
++#define EM_AVR32_OLD 185 /* Atmel Corporation 32-bit microprocessor family */
+ #define EM_STM8 186 /* STMicroeletronics STM8 8-bit microcontroller */
+ #define EM_TILE64 187 /* Tilera TILE64 multicore architecture family */
+ #define EM_TILEPRO 188 /* Tilera TILEPro multicore architecture family */
+@@ -365,6 +365,9 @@
+ /* V850 backend magic number. Written in the absense of an ABI. */
+ #define EM_CYGNUS_V850 0x9080
+
++/* AVR32 magic number, picked by IAR Systems. */
++#define EM_AVR32 0x18ad
++
+ /* old S/390 backend magic number. Written in the absence of an ABI. */
+ #define EM_S390_OLD 0xa390
+
+--- a/ld/configdoc.texi
++++ b/ld/configdoc.texi
+@@ -7,6 +7,7 @@
+ @set H8300
+ @set HPPA
+ @set I960
++@set AVR32
+ @set M68HC11
+ @set M68K
+ @set MMIX
+--- a/ld/configure.tgt
++++ b/ld/configure.tgt
+@@ -112,6 +112,9 @@ xscale-*-elf) targ_emul=armelf
+ avr-*-*) targ_emul=avr2
+ targ_extra_emuls="avr1 avr25 avr3 avr31 avr35 avr4 avr5 avr51 avr6"
+ ;;
++avr32-*-none) targ_emul=avr32elf_ap7000
++ targ_extra_emuls="avr32elf_ap7001 avr32elf_ap7002 avr32elf_ap7200 avr32elf_uc3a0128 avr32elf_uc3a0256 avr32elf_uc3a0512 avr32elf_uc3a0512es avr32elf_uc3a1128 avr32elf_uc3a1256 avr32elf_uc3a1512es avr32elf_uc3a1512 avr32elf_uc3a364 avr32elf_uc3a364s avr32elf_uc3a3128 avr32elf_uc3a3128s avr32elf_uc3a3256 avr32elf_uc3a3256s avr32elf_uc3b064 avr32elf_uc3b0128 avr32elf_uc3b0256es avr32elf_uc3b0256 avr32elf_uc3b0512 avr32elf_uc3b0512revc avr32elf_uc3b164 avr32elf_uc3b1128 avr32elf_uc3b1256es avr32elf_uc3b1256 avr32elf_uc3b1512 avr32elf_uc3b1512revc avr32elf_uc3c0512crevc avr32elf_uc3c1512crevc avr32elf_uc3c2512crevc avr32elf_atuc3l0256 avr32elf_mxt768e avr32elf_uc3l064 avr32elf_uc3l032 avr32elf_uc3l016 avr32elf_uc3l064revb avr32elf_uc3c064c avr32elf_uc3c0128c avr32elf_uc3c0256c avr32elf_uc3c0512c avr32elf_uc3c164c avr32elf_uc3c1128c avr32elf_uc3c1256c avr32elf_uc3c1512c avr32elf_uc3c264c avr32elf_uc3c2128c avr32elf_uc3c2256c avr32elf_uc3c2512c" ;;
++avr32-*-linux*) targ_emul=avr32linux ;;
+ bfin-*-elf) targ_emul=elf32bfin;
+ targ_extra_emuls="elf32bfinfd"
+ targ_extra_libpath=$targ_extra_emuls
+--- /dev/null
++++ b/ld/emulparams/avr32elf.sh
+@@ -0,0 +1,402 @@
++# This script is called from ld/genscript.sh
++# There is a difference on how 'bash' and POSIX handles
++# the '.' (source) command in a script.
++# genscript.sh calls this script with argument ${EMULATION_NAME}
++# but that will fail on POSIX compilant shells like 'sh' or 'dash'
++# therefor I use the variable directly instead of $1
++EMULATION=${EMULATION_NAME}
++SCRIPT_NAME=avr32
++TEMPLATE_NAME=elf32
++EXTRA_EM_FILE=avr32elf
++OUTPUT_FORMAT="elf32-avr32"
++ARCH=avr32
++MAXPAGESIZE=4096
++ENTRY=_start
++EMBEDDED=yes
++NO_SMALL_DATA=yes
++NOP=0xd703d703
++
++DATA_SEGMENT_ALIGN=8
++BSS_ALIGNMENT=8
++
++RO_LMA_REGION="FLASH"
++RO_VMA_REGION="FLASH"
++RW_LMA_REGION="FLASH"
++RW_VMA_REGION="CPUSRAM"
++
++STACK_SIZE=_stack_size
++STACK_ADDR="ORIGIN(CPUSRAM) + LENGTH(CPUSRAM) - ${STACK_SIZE}"
++
++DATA_SEGMENT_END="
++ __heap_start__ = ALIGN(8);
++ . = ${STACK_ADDR};
++ __heap_end__ = .;
++"
++
++case "$EMULATION" in
++avr32elf_ap*)
++ MACHINE=ap
++ INITIAL_READONLY_SECTIONS="
++ .reset : { *(.reset) } >FLASH AT>FLASH
++ . = . & 0x9fffffff;
++"
++ TEXT_START_ADDR=0xa0000000
++ case "$EMULATION" in
++ avr32elf_ap700[0-2])
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai) : ORIGIN = 0x00000000, LENGTH = 64M
++ CPUSRAM (rwxa) : ORIGIN = 0x24000000, LENGTH = 32K
++}
++"
++ ;;
++ avr32elf_ap7200)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai) : ORIGIN = 0x00000000, LENGTH = 64M
++ CPUSRAM (rwxa) : ORIGIN = 0x08000000, LENGTH = 64K
++}
++"
++ ;;
++ esac
++ ;;
++
++avr32elf_mxt768e)
++ MACHINE=uc
++ INITIAL_READONLY_SECTIONS=".reset : { *(.reset) } >FLASH AT>FLASH"
++ TEXT_START_ADDR=0x80000000
++ OTHER_SECTIONS="
++ .userpage : { *(.userpage .userpage.*) } >USERPAGE AT>USERPAGE
++ .factorypage : { *(.factorypage .factorypage.*) } >FACTORYPAGE AT>FACTORYPAGE
++"
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 64K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x3FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++avr32elf_atuc3*)
++ MACHINE=uc
++ INITIAL_READONLY_SECTIONS=".reset : { *(.reset) } >FLASH AT>FLASH"
++ TEXT_START_ADDR=0x80000000
++ OTHER_SECTIONS="
++ .userpage : { *(.userpage .userpage.*) } >USERPAGE AT>USERPAGE
++ .factorypage : { *(.factorypage .factorypage.*) } >FACTORYPAGE AT>FACTORYPAGE
++"
++ case "$EMULATION" in
++ avr32elf_atuc3l0256)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 256K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x3FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++ esac
++ ;;
++
++avr32elf_uc3*)
++ MACHINE=uc
++ INITIAL_READONLY_SECTIONS=".reset : { *(.reset) } >FLASH AT>FLASH"
++ TEXT_START_ADDR=0x80000000
++ OTHER_SECTIONS="
++ .userpage : { *(.userpage .userpage.*) } >USERPAGE AT>USERPAGE
++ .factorypage : { *(.factorypage .factorypage.*) } >FACTORYPAGE AT>FACTORYPAGE
++"
++
++ case "$EMULATION" in
++ avr32elf_uc3c[012]512c)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 512K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0xFFFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++ avr32elf_uc3c[012]256c)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 256K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0xFFFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++ avr32elf_uc3c[012]128c)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 128K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x7FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++ avr32elf_uc3c[012]64c)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 64K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x3FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++ avr32elf_uc3[ac][012]512*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 512K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0xFFFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ ;;
++
++ avr32elf_uc3a[012]256*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 256K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0xFFFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ ;;
++
++ avr32elf_uc3b[01]512revc)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 512K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x17FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ PADDING="
++ .padding : {
++ QUAD(0)
++ QUAD(0)
++ QUAD(0)
++ QUAD(0)
++ } >FLASH AT>FLASH
++"
++ ;;
++
++ avr32elf_uc3b[01]512)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 512K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x17FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ ;;
++
++ avr32elf_uc3b[01]256*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 256K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x7FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ ;;
++
++ avr32elf_uc3[ab][012]128*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 128K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x7FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ ;;
++
++ avr32elf_uc3b[0123]64*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 64K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x3FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ ;;
++
++ avr32elf_uc3a3256*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 256K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0xFFFC
++ HSBSRAM (wxa!ri) : ORIGIN = 0xFF000000, LENGTH = 64K
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .hsbsram : { *(.hsbsram .hsbsram.*) } >HSBSRAM AT>FLASH :FLASH
++"
++
++ ;;
++
++ avr32elf_uc3a3128*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 128K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0xFFFC
++ HSBSRAM (wxa!ri) : ORIGIN = 0xFF000000, LENGTH = 64K
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .hsbsram : { *(.hsbsram .hsbsram.*) } >HSBSRAM AT>FLASH :FLASH
++"
++ ;;
++
++ avr32elf_uc3a364*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 64K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0xFFFC
++ HSBSRAM (wxa!ri) : ORIGIN = 0xFF000000, LENGTH = 64K
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .hsbsram : { *(.hsbsram .hsbsram.*) } >HSBSRAM AT>FLASH :FLASH
++"
++ ;;
++
++
++ avr32elf_uc3l[0123]64*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 64K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x3FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++ avr32elf_uc3l[0123]32*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 32K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x3FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++ avr32elf_uc3l[0123]16*)
++ MEMORY="
++MEMORY
++{
++ FLASH (rxai!w) : ORIGIN = 0x80000000, LENGTH = 16K
++ CPUSRAM (wxa!ri) : ORIGIN = 0x00000004, LENGTH = 0x1FFC
++ USERPAGE : ORIGIN = 0x80800000, LENGTH = 512
++ FACTORYPAGE : ORIGIN = 0x80800200, LENGTH = 512
++ FLASHVAULT_FLASH_SIZE (r) : ORIGIN = 0x80800400, LENGTH = 8
++ FLASHVAULT_RAM_SIZE (r) : ORIGIN = 0x80800408, LENGTH = 8
++}
++"
++ OTHER_SECTIONS="${OTHER_SECTIONS}
++ .flashvault_flash_size : { KEEP(*(.flashvault_flash_size .flashvault_flash_size.*)) } > FLASHVAULT_FLASH_SIZE
++ .flashvault_ram_size : { KEEP(*(.flashvault_ram_size .flashvault_ram_size.*)) } > FLASHVAULT_RAM_SIZE
++"
++ ;;
++
++
++ esac
++ ;;
++
++esac
+--- /dev/null
++++ b/ld/emulparams/avr32linux.sh
+@@ -0,0 +1,14 @@
++ARCH=avr32
++SCRIPT_NAME=elf
++TEMPLATE_NAME=elf32
++EXTRA_EM_FILE=avr32elf
++OUTPUT_FORMAT="elf32-avr32"
++GENERATE_SHLIB_SCRIPT=yes
++MAXPAGESIZE=0x1000
++TEXT_START_ADDR=0x00001000
++NOP=0xd703d703
++
++# This appears to place the GOT before the data section, which is
++# essential for uClinux. We don't use those .s* sections on AVR32
++# anyway, so it shouldn't hurt for regular Linux either...
++NO_SMALL_DATA=yes
+--- /dev/null
++++ b/ld/emultempl/avr32elf.em
+@@ -0,0 +1,162 @@
++# This shell script emits a C file. -*- C -*-
++# Copyright (C) 2007,2008,2009 Atmel Corporation
++#
++# This file is part of GLD, the Gnu Linker.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
++#
++
++# This file is sourced from elf32.em, and defines extra avr32-elf
++# specific routines.
++#
++
++# Generate linker script for writable rodata
++LD_FLAG=rodata-writable
++DATA_ALIGNMENT=${DATA_ALIGNMENT_}
++RELOCATING=" "
++WRITABLE_RODATA=" "
++( echo "/* Linker script for writable rodata */"
++ . ${CUSTOMIZER_SCRIPT} ${EMULATION_NAME}
++ . ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
++) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xwr
++
++
++cat >> e${EMULATION_NAME}.c <<EOF
++
++#include "libbfd.h"
++#include "elf32-avr32.h"
++
++/* Whether to allow direct references (sub or mov) to SEC_DATA and
++ !SEC_CONTENTS sections when optimizing. Not enabled by default
++ since it might cause link errors. */
++static int direct_data_refs = 0;
++
++static void avr32_elf_after_open (void)
++{
++ bfd_elf32_avr32_set_options (&link_info, direct_data_refs);
++ gld${EMULATION_NAME}_after_open ();
++}
++
++static int rodata_writable = 0;
++
++static int stack_size = 0x1000;
++
++static void avr32_elf_set_symbols (void)
++{
++ /* Glue the assignments into the abs section. */
++ lang_statement_list_type *save = stat_ptr;
++
++
++ stat_ptr = &(abs_output_section->children);
++
++ lang_add_assignment (exp_assop ('=', "_stack_size",
++ exp_intop (stack_size)));
++
++ stat_ptr = save;
++}
++
++static char * gld${EMULATION_NAME}_get_script (int *isfile);
++
++static char * avr32_elf_get_script (int *isfile)
++{
++ if ( rodata_writable )
++ {
++EOF
++if test -n "$COMPILE_IN"
++then
++# Scripts compiled in.
++
++# sed commands to quote an ld script as a C string.
++sc="-f stringify.sed"
++
++cat >>e${EMULATION_NAME}.c <<EOF
++ *isfile = 0;
++ return
++EOF
++sed $sc ldscripts/${EMULATION_NAME}.xwr >> e${EMULATION_NAME}.c
++echo ';' >> e${EMULATION_NAME}.c
++else
++# Scripts read from the filesystem.
++
++cat >>e${EMULATION_NAME}.c <<EOF
++ *isfile = 1;
++ return "ldscripts/${EMULATION_NAME}.xwr";
++EOF
++fi
++
++cat >>e${EMULATION_NAME}.c <<EOF
++ }
++ return gld${EMULATION_NAME}_get_script (isfile);
++}
++
++
++EOF
++
++# Define some shell vars to insert bits of code into the standard elf
++# parse_args and list_options functions.
++#
++PARSE_AND_LIST_PROLOGUE='
++#define OPTION_DIRECT_DATA 300
++#define OPTION_NO_DIRECT_DATA 301
++#define OPTION_RODATA_WRITABLE 302
++#define OPTION_NO_RODATA_WRITABLE 303
++#define OPTION_STACK 304
++'
++
++PARSE_AND_LIST_LONGOPTS='
++ { "direct-data", no_argument, NULL, OPTION_DIRECT_DATA },
++ { "no-direct-data", no_argument, NULL, OPTION_NO_DIRECT_DATA },
++ { "rodata-writable", no_argument, NULL, OPTION_RODATA_WRITABLE },
++ { "no-rodata-writable", no_argument, NULL, OPTION_NO_RODATA_WRITABLE },
++ { "stack", required_argument, NULL, OPTION_STACK },
++'
++
++PARSE_AND_LIST_OPTIONS='
++ fprintf (file, _(" --direct-data\t\tAllow direct data references when optimizing\n"));
++ fprintf (file, _(" --no-direct-data\tDo not allow direct data references when optimizing\n"));
++ fprintf (file, _(" --rodata-writable\tPut read-only data in writable data section\n"));
++ fprintf (file, _(" --no-rodata-writable\tDo not put read-only data in writable data section\n"));
++ fprintf (file, _(" --stack <size>\tSet the initial size of the stack\n"));
++'
++
++PARSE_AND_LIST_ARGS_CASES='
++ case OPTION_DIRECT_DATA:
++ direct_data_refs = 1;
++ break;
++ case OPTION_NO_DIRECT_DATA:
++ direct_data_refs = 0;
++ break;
++ case OPTION_RODATA_WRITABLE:
++ rodata_writable = 1;
++ break;
++ case OPTION_NO_RODATA_WRITABLE:
++ rodata_writable = 0;
++ break;
++ case OPTION_STACK:
++ {
++ char *end;
++ stack_size = strtoul (optarg, &end, 0);
++ if (end == optarg)
++ einfo (_("%P%F: invalid hex number for parameter '%s'\n"), optarg);
++ optarg = end;
++ break;
++ }
++'
++
++# Replace some of the standard ELF functions with our own versions.
++#
++LDEMUL_AFTER_OPEN=avr32_elf_after_open
++LDEMUL_GET_SCRIPT=avr32_elf_get_script
++LDEMUL_SET_SYMBOLS=avr32_elf_set_symbols
+--- a/ld/Makefile.am
++++ b/ld/Makefile.am
+@@ -148,6 +148,58 @@ ALL_EMULATIONS = \
+ eavr5.o \
+ eavr51.o \
+ eavr6.o \
++ eavr32elf_ap7000.o \
++ eavr32elf_ap7001.o \
++ eavr32elf_ap7002.o \
++ eavr32elf_ap7200.o \
++ eavr32elf_uc3a0128.o \
++ eavr32elf_uc3a0256.o \
++ eavr32elf_uc3a0512.o \
++ eavr32elf_uc3a0512es.o \
++ eavr32elf_uc3a1128.o \
++ eavr32elf_uc3a1256.o \
++ eavr32elf_uc3a1512es.o \
++ eavr32elf_uc3a1512.o \
++ eavr32elf_uc3a364.o \
++ eavr32elf_uc3a364s.o \
++ eavr32elf_uc3a3128.o \
++ eavr32elf_uc3a3128s.o \
++ eavr32elf_uc3a3256.o \
++ eavr32elf_uc3a3256s.o \
++ eavr32elf_uc3b064.o \
++ eavr32elf_uc3b0128.o \
++ eavr32elf_uc3b0256es.o \
++ eavr32elf_uc3b0256.o \
++ eavr32elf_uc3b0512.o \
++ eavr32elf_uc3b0512revc.o \
++ eavr32elf_uc3b164.o \
++ eavr32elf_uc3b1128.o \
++ eavr32elf_uc3b1256es.o \
++ eavr32elf_uc3b1256.o \
++ eavr32elf_uc3b1512.o \
++ eavr32elf_uc3b1512revc.o \
++ eavr32elf_uc3c0512crevc.o \
++ eavr32elf_uc3c1512crevc.o \
++ eavr32elf_uc3c2512crevc.o \
++ eavr32elf_atuc3l0256.o \
++ eavr32elf_mxt768e.o \
++ eavr32elf_uc3l064.o \
++ eavr32elf_uc3l032.o \
++ eavr32elf_uc3l016.o \
++ eavr32elf_uc3l064revb.o \
++ eavr32elf_uc3c064c.o \
++ eavr32elf_uc3c0128c.o \
++ eavr32elf_uc3c0256c.o \
++ eavr32elf_uc3c0512c.o \
++ eavr32elf_uc3c164c.o \
++ eavr32elf_uc3c1128c.o \
++ eavr32elf_uc3c1256c.o \
++ eavr32elf_uc3c1512c.o \
++ eavr32elf_uc3c264c.o \
++ eavr32elf_uc3c2128c.o \
++ eavr32elf_uc3c2256c.o \
++ eavr32elf_uc3c2512c.o \
++ eavr32linux.o \
+ ecoff_i860.o \
+ ecoff_sparc.o \
+ eelf32_spu.o \
+@@ -727,6 +779,214 @@ eavr6.c: $(srcdir)/emulparams/avr6.sh $(
+ $(ELF_DEPS) $(srcdir)/scripttempl/avr.sc \
+ ${GEN_DEPENDS}
+ ${GENSCRIPTS} avr6 "$(tdir_avr2)"
++eavr32elf_ap7000.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7000 "$(tdir_avr32)" avr32elf
++eavr32elf_ap7001.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7001 "$(tdir_avr32)" avr32elf
++eavr32elf_ap7002.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7002 "$(tdir_avr32)" avr32elf
++eavr32elf_ap7200.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7200 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0512es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0512es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1512es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1512es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a364.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a364 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a364s.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a364s "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3128s.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3128s "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3256s.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3256s "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b064.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b064 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0256es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0256es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0512revc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0512revc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b164.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b164 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1256es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1256es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1512revc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1512revc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c0512crevc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c0512crevc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c1512crevc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c1512crevc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c2512crevc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c2512crevc "$(tdir_avr32)" avr32elf
++eavr32elf_atuc3l0256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_atuc3l0256 "$(tdir_avr32)" avr32elf
++eavr32elf_mxt768e.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_mxt768e "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l064.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l064 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l032.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l032 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l016.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l016 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l064revb.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l064revb "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c064c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c064c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c0128c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c0128c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c0256c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c0256c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c0512c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c0512c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c164c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c164c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c1128c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c1128c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c1256c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c1256c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c1512c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c1512c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c264c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c264c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c2128c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c2128c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c2256c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c2256c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c2512c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c2512c "$(tdir_avr32)" avr32elf
++eavr32linux.c: $(srcdir)/emulparams/avr32linux.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32linux "$(tdir_avr32)"
+ ecoff_i860.c: $(srcdir)/emulparams/coff_i860.sh \
+ $(srcdir)/emultempl/generic.em $(srcdir)/scripttempl/i860coff.sc ${GEN_DEPENDS}
+ ${GENSCRIPTS} coff_i860 "$(tdir_coff_i860)"
+@@ -1964,7 +2224,9 @@ install-exec-local: ld-new$(EXEEXT)
+ || $(LIBTOOL) --mode=install $(INSTALL_PROGRAM) ld-new$(EXEEXT) $(DESTDIR)$(tooldir)/bin/ld$(EXEEXT); \
+ fi
+
+-install-data-local:
++# We want install to imply install-info as per GNU standards, despite the
++# cygnus option.
++install-data-local: install-info
+ $(mkinstalldirs) $(DESTDIR)$(scriptdir)/ldscripts
+ for f in ldscripts/*; do \
+ $(INSTALL_DATA) $$f $(DESTDIR)$(scriptdir)/$$f ; \
+--- /dev/null
++++ b/ld/scripttempl/avr32.sc
+@@ -0,0 +1,459 @@
++#
++# Unusual variables checked by this code:
++# NOP - four byte opcode for no-op (defaults to 0)
++# NO_SMALL_DATA - no .sbss/.sbss2/.sdata/.sdata2 sections if not
++# empty.
++# SMALL_DATA_CTOR - .ctors contains small data.
++# SMALL_DATA_DTOR - .dtors contains small data.
++# DATA_ADDR - if end-of-text-plus-one-page isn't right for data start
++# INITIAL_READONLY_SECTIONS - at start of text segment
++# OTHER_READONLY_SECTIONS - other than .text .init .rodata ...
++# (e.g., .PARISC.milli)
++# OTHER_TEXT_SECTIONS - these get put in .text when relocating
++# OTHER_READWRITE_SECTIONS - other than .data .bss .ctors .sdata ...
++# (e.g., .PARISC.global)
++# OTHER_RELRO_SECTIONS - other than .data.rel.ro ...
++# (e.g. PPC32 .fixup, .got[12])
++# OTHER_BSS_SECTIONS - other than .bss .sbss ...
++# OTHER_SECTIONS - at the end
++# EXECUTABLE_SYMBOLS - symbols that must be defined for an
++# executable (e.g., _DYNAMIC_LINK)
++# TEXT_START_ADDR - the first byte of the text segment, after any
++# headers.
++# TEXT_BASE_ADDRESS - the first byte of the text segment.
++# TEXT_START_SYMBOLS - symbols that appear at the start of the
++# .text section.
++# DATA_START_SYMBOLS - symbols that appear at the start of the
++# .data section.
++# OTHER_GOT_SYMBOLS - symbols defined just before .got.
++# OTHER_GOT_SECTIONS - sections just after .got.
++# OTHER_SDATA_SECTIONS - sections just after .sdata.
++# OTHER_BSS_SYMBOLS - symbols that appear at the start of the
++# .bss section besides __bss_start.
++# DATA_PLT - .plt should be in data segment, not text segment.
++# PLT_BEFORE_GOT - .plt just before .got when .plt is in data segement.
++# BSS_PLT - .plt should be in bss segment
++# TEXT_DYNAMIC - .dynamic in text segment, not data segment.
++# EMBEDDED - whether this is for an embedded system.
++# SHLIB_TEXT_START_ADDR - if set, add to SIZEOF_HEADERS to set
++# start address of shared library.
++# INPUT_FILES - INPUT command of files to always include
++# WRITABLE_RODATA - if set, the .rodata section should be writable
++# INIT_START, INIT_END - statements just before and just after
++# combination of .init sections.
++# FINI_START, FINI_END - statements just before and just after
++# combination of .fini sections.
++# STACK_ADDR - start of a .stack section.
++# OTHER_END_SYMBOLS - symbols to place right at the end of the script.
++# SEPARATE_GOTPLT - if set, .got.plt should be separate output section,
++# so that .got can be in the RELRO area. It should be set to
++# the number of bytes in the beginning of .got.plt which can be
++# in the RELRO area as well.
++#
++# When adding sections, do note that the names of some sections are used
++# when specifying the start address of the next.
++#
++
++# Many sections come in three flavours. There is the 'real' section,
++# like ".data". Then there are the per-procedure or per-variable
++# sections, generated by -ffunction-sections and -fdata-sections in GCC,
++# and useful for --gc-sections, which for a variable "foo" might be
++# ".data.foo". Then there are the linkonce sections, for which the linker
++# eliminates duplicates, which are named like ".gnu.linkonce.d.foo".
++# The exact correspondences are:
++#
++# Section Linkonce section
++# .text .gnu.linkonce.t.foo
++# .rodata .gnu.linkonce.r.foo
++# .data .gnu.linkonce.d.foo
++# .bss .gnu.linkonce.b.foo
++# .sdata .gnu.linkonce.s.foo
++# .sbss .gnu.linkonce.sb.foo
++# .sdata2 .gnu.linkonce.s2.foo
++# .sbss2 .gnu.linkonce.sb2.foo
++# .debug_info .gnu.linkonce.wi.foo
++# .tdata .gnu.linkonce.td.foo
++# .tbss .gnu.linkonce.tb.foo
++#
++# Each of these can also have corresponding .rel.* and .rela.* sections.
++
++test -z "$ENTRY" && ENTRY=_start
++test -z "${BIG_OUTPUT_FORMAT}" && BIG_OUTPUT_FORMAT=${OUTPUT_FORMAT}
++test -z "${LITTLE_OUTPUT_FORMAT}" && LITTLE_OUTPUT_FORMAT=${OUTPUT_FORMAT}
++if [ -z "$MACHINE" ]; then OUTPUT_ARCH=${ARCH}; else OUTPUT_ARCH=${ARCH}:${MACHINE}; fi
++test -z "${ELFSIZE}" && ELFSIZE=32
++test -z "${ALIGNMENT}" && ALIGNMENT="${ELFSIZE} / 8"
++test "$LD_FLAG" = "N" && DATA_ADDR=.
++test -n "$CREATE_SHLIB$CREATE_PIE" && test -n "$SHLIB_DATA_ADDR" && COMMONPAGESIZE=""
++test -z "$CREATE_SHLIB$CREATE_PIE" && test -n "$DATA_ADDR" && COMMONPAGESIZE=""
++test -n "$RELRO_NOW" && unset SEPARATE_GOTPLT
++if test -n "$RELOCATING"; then
++ RO_REGION="${RO_VMA_REGION+ >}${RO_VMA_REGION}${RO_LMA_REGION+ AT>}${RO_LMA_REGION}"
++ RW_REGION="${RW_VMA_REGION+ >}${RW_VMA_REGION}${RW_LMA_REGION+ AT>}${RW_LMA_REGION}"
++ RW_BSS_REGION="${RW_VMA_REGION+ >}${RW_VMA_REGION}"
++else
++ RO_REGION=""
++ RW_REGION=""
++ RW_BSS_REGION=""
++fi
++INTERP=".interp ${RELOCATING-0} : { *(.interp) }${RO_REGION}"
++PLT=".plt ${RELOCATING-0} : { *(.plt) }"
++if test -z "$GOT"; then
++ if test -z "$SEPARATE_GOTPLT"; then
++ GOT=".got ${RELOCATING-0} : { *(.got.plt) *(.got) }"
++ else
++ GOT=".got ${RELOCATING-0} : { *(.got) }"
++ GOTPLT="${RELOCATING+${DATA_SEGMENT_RELRO_GOTPLT_END}}
++ .got.plt ${RELOCATING-0} : { *(.got.plt) }"
++ fi
++fi
++DALIGN=".dalign : { . = ALIGN(${DATA_SEGMENT_ALIGN}); PROVIDE(_data_lma = .); }${RO_REGION}"
++BALIGN=".balign : { . = ALIGN(${BSS_ALIGNMENT}); _edata = .; }${RW_REGION}"
++DYNAMIC=".dynamic ${RELOCATING-0} : { *(.dynamic) }"
++RODATA=".rodata ${RELOCATING-0} : { *(.rodata${RELOCATING+ .rodata.* .gnu.linkonce.r.*}) }"
++DATARELRO=".data.rel.ro : { *(.data.rel.ro.local) *(.data.rel.ro*) }${RW_REGION}"
++STACKNOTE="/DISCARD/ : { *(.note.GNU-stack) }"
++if test -z "${NO_SMALL_DATA}"; then
++ SBSS=".sbss ${RELOCATING-0} :
++ {
++ ${RELOCATING+PROVIDE (__sbss_start = .);}
++ ${RELOCATING+PROVIDE (___sbss_start = .);}
++ ${CREATE_SHLIB+*(.sbss2 .sbss2.* .gnu.linkonce.sb2.*)}
++ *(.dynsbss)
++ *(.sbss${RELOCATING+ .sbss.* .gnu.linkonce.sb.*})
++ *(.scommon)
++ ${RELOCATING+PROVIDE (__sbss_end = .);}
++ ${RELOCATING+PROVIDE (___sbss_end = .);}
++ }${RW_BSS_REGION}"
++ SBSS2=".sbss2 ${RELOCATING-0} : { *(.sbss2${RELOCATING+ .sbss2.* .gnu.linkonce.sb2.*}) }${RW_REGION}"
++ SDATA="/* We want the small data sections together, so single-instruction offsets
++ can access them all, and initialized data all before uninitialized, so
++ we can shorten the on-disk segment size. */
++ .sdata ${RELOCATING-0} :
++ {
++ ${RELOCATING+${SDATA_START_SYMBOLS}}
++ ${CREATE_SHLIB+*(.sdata2 .sdata2.* .gnu.linkonce.s2.*)}
++ *(.sdata${RELOCATING+ .sdata.* .gnu.linkonce.s.*})
++ }${RW_REGION}"
++ SDATA2=".sdata2 ${RELOCATING-0} : { *(.sdata2${RELOCATING+ .sdata2.* .gnu.linkonce.s2.*}) }${RW_REGION}"
++ REL_SDATA=".rel.sdata ${RELOCATING-0} : { *(.rel.sdata${RELOCATING+ .rel.sdata.* .rel.gnu.linkonce.s.*}) }${RO_REGION}
++ .rela.sdata ${RELOCATING-0} : { *(.rela.sdata${RELOCATING+ .rela.sdata.* .rela.gnu.linkonce.s.*}) }"
++ REL_SBSS=".rel.sbss ${RELOCATING-0} : { *(.rel.sbss${RELOCATING+ .rel.sbss.* .rel.gnu.linkonce.sb.*}) }${RO_REGION}
++ .rela.sbss ${RELOCATING-0} : { *(.rela.sbss${RELOCATING+ .rela.sbss.* .rela.gnu.linkonce.sb.*}) }${RO_REGION}"
++ REL_SDATA2=".rel.sdata2 ${RELOCATING-0} : { *(.rel.sdata2${RELOCATING+ .rel.sdata2.* .rel.gnu.linkonce.s2.*}) }${RO_REGION}
++ .rela.sdata2 ${RELOCATING-0} : { *(.rela.sdata2${RELOCATING+ .rela.sdata2.* .rela.gnu.linkonce.s2.*}) }${RO_REGION}"
++ REL_SBSS2=".rel.sbss2 ${RELOCATING-0} : { *(.rel.sbss2${RELOCATING+ .rel.sbss2.* .rel.gnu.linkonce.sb2.*}) }${RO_REGION}
++ .rela.sbss2 ${RELOCATING-0} : { *(.rela.sbss2${RELOCATING+ .rela.sbss2.* .rela.gnu.linkonce.sb2.*}) }${RO_REGION}"
++else
++ NO_SMALL_DATA=" "
++fi
++test -n "$SEPARATE_GOTPLT" && SEPARATE_GOTPLT=" "
++CTOR=".ctors ${CONSTRUCTING-0} :
++ {
++ ${CONSTRUCTING+${CTOR_START}}
++ /* gcc uses crtbegin.o to find the start of
++ the constructors, so we make sure it is
++ first. Because this is a wildcard, it
++ doesn't matter if the user does not
++ actually link against crtbegin.o; the
++ linker won't look for a file to match a
++ wildcard. The wildcard also means that it
++ doesn't matter which directory crtbegin.o
++ is in. */
++
++ KEEP (*crtbegin*.o(.ctors))
++
++ /* We don't want to include the .ctor section from
++ from the crtend.o file until after the sorted ctors.
++ The .ctor section from the crtend file contains the
++ end of ctors marker and it must be last */
++
++ KEEP (*(EXCLUDE_FILE (*crtend*.o $OTHER_EXCLUDE_FILES) .ctors))
++ KEEP (*(SORT(.ctors.*)))
++ KEEP (*(.ctors))
++ ${CONSTRUCTING+${CTOR_END}}
++ }"
++DTOR=".dtors ${CONSTRUCTING-0} :
++ {
++ ${CONSTRUCTING+${DTOR_START}}
++ KEEP (*crtbegin*.o(.dtors))
++ KEEP (*(EXCLUDE_FILE (*crtend*.o $OTHER_EXCLUDE_FILES) .dtors))
++ KEEP (*(SORT(.dtors.*)))
++ KEEP (*(.dtors))
++ ${CONSTRUCTING+${DTOR_END}}
++ }"
++STACK=".stack ${RELOCATING-0}${RELOCATING+${STACK_ADDR}} :
++ {
++ ${RELOCATING+_stack = .;}
++ *(.stack)
++ ${RELOCATING+${STACK_SIZE+. = ${STACK_SIZE};}}
++ ${RELOCATING+_estack = .;}
++ }${RW_BSS_REGION}"
++
++# if this is for an embedded system, don't add SIZEOF_HEADERS.
++if [ -z "$EMBEDDED" ]; then
++ test -z "${TEXT_BASE_ADDRESS}" && TEXT_BASE_ADDRESS="${TEXT_START_ADDR} + SIZEOF_HEADERS"
++else
++ test -z "${TEXT_BASE_ADDRESS}" && TEXT_BASE_ADDRESS="${TEXT_START_ADDR}"
++fi
++
++cat <<EOF
++OUTPUT_FORMAT("${OUTPUT_FORMAT}", "${BIG_OUTPUT_FORMAT}",
++ "${LITTLE_OUTPUT_FORMAT}")
++OUTPUT_ARCH(${OUTPUT_ARCH})
++ENTRY(${ENTRY})
++
++${RELOCATING+${LIB_SEARCH_DIRS}}
++${RELOCATING+/* Do we need any of these for elf?
++ __DYNAMIC = 0; ${STACKZERO+${STACKZERO}} ${SHLIB_PATH+${SHLIB_PATH}} */}
++${RELOCATING+${EXECUTABLE_SYMBOLS}}
++${RELOCATING+${INPUT_FILES}}
++${RELOCATING- /* For some reason, the Solaris linker makes bad executables
++ if gld -r is used and the intermediate file has sections starting
++ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
++ bug. But for now assigning the zero vmas works. */}
++
++${RELOCATING+${MEMORY}}
++
++SECTIONS
++{
++ /* Read-only sections, merged into text segment: */
++ ${CREATE_SHLIB-${CREATE_PIE-${RELOCATING+PROVIDE (__executable_start = ${TEXT_START_ADDR}); . = ${TEXT_BASE_ADDRESS};}}}
++ ${PADDING}
++ ${CREATE_SHLIB+${RELOCATING+. = ${SHLIB_TEXT_START_ADDR:-0} + SIZEOF_HEADERS;}}
++ ${CREATE_PIE+${RELOCATING+. = ${SHLIB_TEXT_START_ADDR:-0} + SIZEOF_HEADERS;}}
++ ${CREATE_SHLIB-${INTERP}}
++ ${INITIAL_READONLY_SECTIONS}
++ ${TEXT_DYNAMIC+${DYNAMIC}${RO_REGION}}
++ .hash ${RELOCATING-0} : { *(.hash) }${RO_REGION}
++ .dynsym ${RELOCATING-0} : { *(.dynsym) }${RO_REGION}
++ .dynstr ${RELOCATING-0} : { *(.dynstr) }${RO_REGION}
++ .gnu.version ${RELOCATING-0} : { *(.gnu.version) }${RO_REGION}
++ .gnu.version_d ${RELOCATING-0}: { *(.gnu.version_d) }${RO_REGION}
++ .gnu.version_r ${RELOCATING-0}: { *(.gnu.version_r) }${RO_REGION}
++
++EOF
++if [ "x$COMBRELOC" = x ]; then
++ COMBRELOCCAT=cat
++else
++ COMBRELOCCAT="cat > $COMBRELOC"
++fi
++eval $COMBRELOCCAT <<EOF
++ .rel.init ${RELOCATING-0} : { *(.rel.init) }${RO_REGION}
++ .rela.init ${RELOCATING-0} : { *(.rela.init) }${RO_REGION}
++ .rel.text ${RELOCATING-0} : { *(.rel.text${RELOCATING+ .rel.text.* .rel.gnu.linkonce.t.*}) }${RO_REGION}
++ .rela.text ${RELOCATING-0} : { *(.rela.text${RELOCATING+ .rela.text.* .rela.gnu.linkonce.t.*}) }${RO_REGION}
++ .rel.fini ${RELOCATING-0} : { *(.rel.fini) }${RO_REGION}
++ .rela.fini ${RELOCATING-0} : { *(.rela.fini) }${RO_REGION}
++ .rel.rodata ${RELOCATING-0} : { *(.rel.rodata${RELOCATING+ .rel.rodata.* .rel.gnu.linkonce.r.*}) }${RO_REGION}
++ .rela.rodata ${RELOCATING-0} : { *(.rela.rodata${RELOCATING+ .rela.rodata.* .rela.gnu.linkonce.r.*}) }${RO_REGION}
++ ${OTHER_READONLY_RELOC_SECTIONS}
++ .rel.data.rel.ro ${RELOCATING-0} : { *(.rel.data.rel.ro${RELOCATING+*}) }${RO_REGION}
++ .rela.data.rel.ro ${RELOCATING-0} : { *(.rel.data.rel.ro${RELOCATING+*}) }${RO_REGION}
++ .rel.data ${RELOCATING-0} : { *(.rel.data${RELOCATING+ .rel.data.* .rel.gnu.linkonce.d.*}) }${RO_REGION}
++ .rela.data ${RELOCATING-0} : { *(.rela.data${RELOCATING+ .rela.data.* .rela.gnu.linkonce.d.*}) }${RO_REGION}
++ .rel.tdata ${RELOCATING-0} : { *(.rel.tdata${RELOCATING+ .rel.tdata.* .rel.gnu.linkonce.td.*}) }${RO_REGION}
++ .rela.tdata ${RELOCATING-0} : { *(.rela.tdata${RELOCATING+ .rela.tdata.* .rela.gnu.linkonce.td.*}) }${RO_REGION}
++ .rel.tbss ${RELOCATING-0} : { *(.rel.tbss${RELOCATING+ .rel.tbss.* .rel.gnu.linkonce.tb.*}) }${RO_REGION}
++ .rela.tbss ${RELOCATING-0} : { *(.rela.tbss${RELOCATING+ .rela.tbss.* .rela.gnu.linkonce.tb.*}) }${RO_REGION}
++ .rel.ctors ${RELOCATING-0} : { *(.rel.ctors) }${RO_REGION}
++ .rela.ctors ${RELOCATING-0} : { *(.rela.ctors) }${RO_REGION}
++ .rel.dtors ${RELOCATING-0} : { *(.rel.dtors) }${RO_REGION}
++ .rela.dtors ${RELOCATING-0} : { *(.rela.dtors) }${RO_REGION}
++ .rel.got ${RELOCATING-0} : { *(.rel.got) }${RO_REGION}
++ .rela.got ${RELOCATING-0} : { *(.rela.got) }${RO_REGION}
++ ${OTHER_GOT_RELOC_SECTIONS}
++ ${REL_SDATA}
++ ${REL_SBSS}
++ ${REL_SDATA2}
++ ${REL_SBSS2}
++ .rel.bss ${RELOCATING-0} : { *(.rel.bss${RELOCATING+ .rel.bss.* .rel.gnu.linkonce.b.*}) }${RO_REGION}
++ .rela.bss ${RELOCATING-0} : { *(.rela.bss${RELOCATING+ .rela.bss.* .rela.gnu.linkonce.b.*}) }${RO_REGION}
++EOF
++if [ -n "$COMBRELOC" ]; then
++cat <<EOF
++ .rel.dyn ${RELOCATING-0} :
++ {
++EOF
++sed -e '/^[ ]*[{}][ ]*$/d;/:[ ]*$/d;/\.rela\./d;s/^.*: { *\(.*\)}$/ \1/' $COMBRELOC
++cat <<EOF
++ }${RO_REGION}
++ .rela.dyn ${RELOCATING-0} :
++ {
++EOF
++sed -e '/^[ ]*[{}][ ]*$/d;/:[ ]*$/d;/\.rel\./d;s/^.*: { *\(.*\)}/ \1/' $COMBRELOC
++cat <<EOF
++ }${RO_REGION}
++EOF
++fi
++cat <<EOF
++ .rel.plt ${RELOCATING-0} : { *(.rel.plt) }${RO_REGION}
++ .rela.plt ${RELOCATING-0} : { *(.rela.plt) }${RO_REGION}
++ ${OTHER_PLT_RELOC_SECTIONS}
++
++ .init ${RELOCATING-0} :
++ {
++ ${RELOCATING+${INIT_START}}
++ KEEP (*(.init))
++ ${RELOCATING+${INIT_END}}
++ }${RO_REGION} =${NOP-0}
++
++ ${DATA_PLT-${BSS_PLT-${PLT}${RO_REGION}}}
++ .text ${RELOCATING-0} :
++ {
++ ${RELOCATING+${TEXT_START_SYMBOLS}}
++ *(.text .stub${RELOCATING+ .text.* .gnu.linkonce.t.*})
++ KEEP (*(.text.*personality*))
++ /* .gnu.warning sections are handled specially by elf32.em. */
++ *(.gnu.warning)
++ ${RELOCATING+${OTHER_TEXT_SECTIONS}}
++ }${RO_REGION} =${NOP-0}
++ .fini ${RELOCATING-0} :
++ {
++ ${RELOCATING+${FINI_START}}
++ KEEP (*(.fini))
++ ${RELOCATING+${FINI_END}}
++ }${RO_REGION} =${NOP-0}
++ ${RELOCATING+PROVIDE (__etext = .);}
++ ${RELOCATING+PROVIDE (_etext = .);}
++ ${RELOCATING+PROVIDE (etext = .);}
++ ${WRITABLE_RODATA-${RODATA}${RO_REGION}}
++ .rodata1 ${RELOCATING-0} : { *(.rodata1) }${RO_REGION}
++ ${CREATE_SHLIB-${SDATA2}}
++ ${CREATE_SHLIB-${SBSS2}}
++ ${OTHER_READONLY_SECTIONS}
++ .eh_frame_hdr : { *(.eh_frame_hdr) }${RO_REGION}
++ .eh_frame ${RELOCATING-0} : ONLY_IF_RO { KEEP (*(.eh_frame)) }${RO_REGION}
++ .gcc_except_table ${RELOCATING-0} : ONLY_IF_RO { KEEP (*(.gcc_except_table)) *(.gcc_except_table.*) }${RO_REGION}
++
++ ${RELOCATING+${DALIGN}}
++ ${RELOCATING+PROVIDE (_data = ORIGIN(${RW_VMA_REGION}));}
++ . = ORIGIN(${RW_VMA_REGION});
++ /* Exception handling */
++ .eh_frame ${RELOCATING-0} : ONLY_IF_RW { KEEP (*(.eh_frame)) }${RW_REGION}
++ .gcc_except_table ${RELOCATING-0} : ONLY_IF_RW { KEEP (*(.gcc_except_table)) *(.gcc_except_table.*) }${RW_REGION}
++
++ /* Thread Local Storage sections */
++ .tdata ${RELOCATING-0} : { *(.tdata${RELOCATING+ .tdata.* .gnu.linkonce.td.*}) }${RW_REGION}
++ .tbss ${RELOCATING-0} : { *(.tbss${RELOCATING+ .tbss.* .gnu.linkonce.tb.*})${RELOCATING+ *(.tcommon)} }${RW_BSS_REGION}
++
++ /* Ensure the __preinit_array_start label is properly aligned. We
++ could instead move the label definition inside the section, but
++ the linker would then create the section even if it turns out to
++ be empty, which isn't pretty. */
++ ${RELOCATING+${CREATE_SHLIB-PROVIDE (__preinit_array_start = ALIGN(${ALIGNMENT}));}}
++ .preinit_array ${RELOCATING-0} : { KEEP (*(.preinit_array)) }${RW_REGION}
++ ${RELOCATING+${CREATE_SHLIB-PROVIDE (__preinit_array_end = .);}}
++
++ ${RELOCATING+${CREATE_SHLIB-PROVIDE (__init_array_start = .);}}
++ .init_array ${RELOCATING-0} : { KEEP (*(.init_array)) }${RW_REGION}
++ ${RELOCATING+${CREATE_SHLIB-PROVIDE (__init_array_end = .);}}
++
++ ${RELOCATING+${CREATE_SHLIB-PROVIDE (__fini_array_start = .);}}
++ .fini_array ${RELOCATING-0} : { KEEP (*(.fini_array)) }${RW_REGION}
++ ${RELOCATING+${CREATE_SHLIB-PROVIDE (__fini_array_end = .);}}
++
++ ${SMALL_DATA_CTOR-${RELOCATING+${CTOR}${RW_REGION}}}
++ ${SMALL_DATA_DTOR-${RELOCATING+${DTOR}${RW_REGION}}}
++ .jcr ${RELOCATING-0} : { KEEP (*(.jcr)) }${RW_REGION}
++
++ ${RELOCATING+${DATARELRO}}
++ ${OTHER_RELRO_SECTIONS}
++ ${TEXT_DYNAMIC-${DYNAMIC}${RW_REGION}}
++ ${NO_SMALL_DATA+${RELRO_NOW+${GOT}${RW_REGION}}}
++ ${NO_SMALL_DATA+${RELRO_NOW-${SEPARATE_GOTPLT+${GOT}${RW_REGION}}}}
++ ${NO_SMALL_DATA+${RELRO_NOW-${SEPARATE_GOTPLT+${GOTPLT}${RW_REGION}}}}
++ ${RELOCATING+${DATA_SEGMENT_RELRO_END}}
++ ${NO_SMALL_DATA+${RELRO_NOW-${SEPARATE_GOTPLT-${GOT}${RW_REGION}}}}
++
++ ${DATA_PLT+${PLT_BEFORE_GOT-${PLT}${RW_REGION}}}
++
++ .data ${RELOCATING-0} :
++ {
++ ${RELOCATING+${DATA_START_SYMBOLS}}
++ *(.data${RELOCATING+ .data.* .gnu.linkonce.d.*})
++ KEEP (*(.gnu.linkonce.d.*personality*))
++ ${CONSTRUCTING+SORT(CONSTRUCTORS)}
++ }${RW_REGION}
++ .data1 ${RELOCATING-0} : { *(.data1) }${RW_REGION}
++ ${WRITABLE_RODATA+${RODATA}${RW_REGION}}
++ ${OTHER_READWRITE_SECTIONS}
++ ${SMALL_DATA_CTOR+${RELOCATING+${CTOR}${RW_REGION}}}
++ ${SMALL_DATA_DTOR+${RELOCATING+${DTOR}${RW_REGION}}}
++ ${DATA_PLT+${PLT_BEFORE_GOT+${PLT}${RW_REGION}}}
++ ${RELOCATING+${OTHER_GOT_SYMBOLS}}
++ ${NO_SMALL_DATA-${GOT}${RW_REGION}}
++ ${OTHER_GOT_SECTIONS}
++ ${SDATA}
++ ${OTHER_SDATA_SECTIONS}
++ ${RELOCATING+${BALIGN}}
++ ${RELOCATING+_edata = .;}
++ ${RELOCATING+PROVIDE (edata = .);}
++ ${RELOCATING+__bss_start = .;}
++ ${RELOCATING+${OTHER_BSS_SYMBOLS}}
++ ${SBSS}
++ ${BSS_PLT+${PLT}${RW_REGION}}
++ .bss ${RELOCATING-0} :
++ {
++ *(.dynbss)
++ *(.bss${RELOCATING+ .bss.* .gnu.linkonce.b.*})
++ *(COMMON)
++ /* Align here to ensure that the .bss section occupies space up to
++ _end. Align after .bss to ensure correct alignment even if the
++ .bss section disappears because there are no input sections. */
++ ${RELOCATING+. = ALIGN(${BSS_ALIGNMENT});}
++ }${RW_BSS_REGION}
++ ${OTHER_BSS_SECTIONS}
++ ${RELOCATING+. = ALIGN(${BSS_ALIGNMENT});}
++ ${RELOCATING+_end = .;}
++ ${RELOCATING+${OTHER_BSS_END_SYMBOLS}}
++ ${RELOCATING+PROVIDE (end = .);}
++ ${RELOCATING+${DATA_SEGMENT_END}}
++
++ /* Stabs debugging sections. */
++ .stab 0 : { *(.stab) }
++ .stabstr 0 : { *(.stabstr) }
++ .stab.excl 0 : { *(.stab.excl) }
++ .stab.exclstr 0 : { *(.stab.exclstr) }
++ .stab.index 0 : { *(.stab.index) }
++ .stab.indexstr 0 : { *(.stab.indexstr) }
++
++ .comment 0 : { *(.comment) }
++
++ /* DWARF debug sections.
++ Symbols in the DWARF debugging sections are relative to the beginning
++ of the section so we begin them at 0. */
++
++ /* DWARF 1 */
++ .debug 0 : { *(.debug) }
++ .line 0 : { *(.line) }
++
++ /* GNU DWARF 1 extensions */
++ .debug_srcinfo 0 : { *(.debug_srcinfo) }
++ .debug_sfnames 0 : { *(.debug_sfnames) }
++
++ /* DWARF 1.1 and DWARF 2 */
++ .debug_aranges 0 : { *(.debug_aranges) }
++ .debug_pubnames 0 : { *(.debug_pubnames) }
++
++ /* DWARF 2 */
++ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
++ .debug_abbrev 0 : { *(.debug_abbrev) }
++ .debug_line 0 : { *(.debug_line) }
++ .debug_frame 0 : { *(.debug_frame) }
++ .debug_str 0 : { *(.debug_str) }
++ .debug_loc 0 : { *(.debug_loc) }
++ .debug_macinfo 0 : { *(.debug_macinfo) }
++
++ /* SGI/MIPS DWARF 2 extensions */
++ .debug_weaknames 0 : { *(.debug_weaknames) }
++ .debug_funcnames 0 : { *(.debug_funcnames) }
++ .debug_typenames 0 : { *(.debug_typenames) }
++ .debug_varnames 0 : { *(.debug_varnames) }
++
++ ${STACK_ADDR+${STACK}}
++ ${OTHER_SECTIONS}
++ ${RELOCATING+${OTHER_END_SYMBOLS}}
++ ${RELOCATING+${STACKNOTE}}
++}
++EOF
+--- /dev/null
++++ b/ld/testsuite/ld-avr32/avr32.exp
+@@ -0,0 +1,25 @@
++# Expect script for AVR32 ELF linker tests.
++# Copyright 2004-2006 Atmel Corporation.
++#
++# This file is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++#
++# Written by Haavard Skinnemoen (hskinnemoen@atmel.com)
++#
++
++if ![istarget avr32-*-*] {
++ return
++}
++
++run_dump_test "pcrel"
+--- /dev/null
++++ b/ld/testsuite/ld-avr32/pcrel.d
+@@ -0,0 +1,74 @@
++#name: AVR32 ELF PC-relative external relocs
++#source: symbols.s
++#source: ../../../gas/testsuite/gas/avr32/pcrel.s
++#ld: -T $srcdir/$subdir/pcrel.ld
++#objdump: -d
++
++.*: file format elf.*avr32.*
++
++Disassembly of section .text:
++
++a0000000 <_start>:
++a0000000: d7 03 nop
++a0000002: d7 03 nop
++
++a0000004 <test_rjmp>:
++a0000004: d7 03 nop
++a0000006: c0 28 rjmp a000000a <test_rjmp\+0x6>
++a0000008: d7 03 nop
++a000000a: e0 8f 01 fb bral a0000400 <extsym10>
++
++a000000e <test_rcall>:
++a000000e: d7 03 nop
++a0000010 <test_rcall2>:
++a0000010: c0 2c rcall a0000014 <test_rcall2\+0x4>
++a0000012: d7 03 nop
++a0000014: ee b0 ff f6 rcall a0200000 <extsym21>
++
++a0000018 <test_branch>:
++a0000018: c0 31 brne a000001e <test_branch\+0x6>
++a000001a: fe 9f ff ff bral a0000018 <test_branch>
++a000001e: ee 90 ff f1 breq a0200000 <extsym21>
++
++a0000022 <test_lddpc>:
++a0000022: 48 30 lddpc r0,a000002c <sym1>
++a0000024: 48 20 lddpc r0,a000002c <sym1>
++a0000026: fe f0 7f da ld.w r0,pc\[32730\]
++ ...
++
++a000002c <sym1>:
++a000002c: d7 03 nop
++a000002e: d7 03 nop
++
++a0000030 <test_local>:
++a0000030: 48 20 lddpc r0,a0000038 <test_local\+0x8>
++a0000032: 48 30 lddpc r0,a000003c <test_local\+0xc>
++a0000034: 48 20 lddpc r0,a000003c <test_local\+0xc>
++a0000036: 00 00 add r0,r0
++a0000038: d7 03 nop
++a000003a: d7 03 nop
++a000003c: d7 03 nop
++a000003e: d7 03 nop
++
++Disassembly of section \.text\.init:
++a0000040 <test_inter_section>:
++a0000040: fe b0 ff e7 rcall a000000e <test_rcall>
++a0000044: d7 03 nop
++a0000046: fe b0 ff e4 rcall a000000e <test_rcall>
++a000004a: fe b0 ff e3 rcall a0000010 <test_rcall2>
++a000004e: d7 03 nop
++a0000050: fe b0 ff e0 rcall a0000010 <test_rcall2>
++
++Disassembly of section \.text\.pcrel10:
++
++a0000400 <extsym10>:
++a0000400: d7 03 nop
++
++Disassembly of section \.text\.pcrel16:
++
++a0008000 <extsym16>:
++a0008000: d7 03 nop
++
++Disassembly of section \.text\.pcrel21:
++a0200000 <extsym21>:
++a0200000: d7 03 nop
+--- /dev/null
++++ b/ld/testsuite/ld-avr32/pcrel.ld
+@@ -0,0 +1,23 @@
++ENTRY(_start)
++SECTIONS
++{
++ .text 0xa0000000:
++ {
++ *(.text)
++ }
++
++ .text.pcrel10 0xa0000400:
++ {
++ *(.text.pcrel10)
++ }
++
++ .text.pcrel16 0xa0008000:
++ {
++ *(.text.pcrel16)
++ }
++
++ .text.pcrel21 0xa0200000:
++ {
++ *(.text.pcrel21)
++ }
++}
+--- /dev/null
++++ b/ld/testsuite/ld-avr32/symbols.s
+@@ -0,0 +1,20 @@
++ .text
++ .global _start
++_start:
++ nop
++ nop
++
++ .section .text.pcrel10,"ax"
++ .global extsym10
++extsym10:
++ nop
++
++ .section .text.pcrel16,"ax"
++ .global extsym16
++extsym16:
++ nop
++
++ .section .text.pcrel21,"ax"
++ .global extsym21
++extsym21:
++ nop
+--- /dev/null
++++ b/opcodes/avr32-asm.c
+@@ -0,0 +1,244 @@
++/* Assembler interface for AVR32.
++ Copyright 2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of libopcodes.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++
++#include <string.h>
++
++#include "avr32-opc.h"
++#include "avr32-asm.h"
++
++/* Structure for a register hash table entry. */
++struct reg_entry
++{
++ const char *name;
++ int number;
++};
++
++/* Integer Registers. */
++static const struct reg_entry reg_table[] =
++ {
++ /* Primary names (used by the disassembler) */
++ { "r0", 0 }, { "r1", 1 }, { "r2", 2 }, { "r3", 3 },
++ { "r4", 4 }, { "r5", 5 }, { "r6", 6 }, { "r7", 7 },
++ { "r8", 8 }, { "r9", 9 }, { "r10", 10 }, { "r11", 11 },
++ { "r12", 12 }, { "sp", 13 }, { "lr", 14 }, { "pc", 15 },
++ /* Alternatives to sp, lr and pc. */
++ { "r13", 13 }, { "r14", 14 }, { "r15", 15 },
++ };
++#define AVR32_NR_INTREGS (sizeof(reg_table)/sizeof(reg_table[0]))
++
++/* Coprocessor Registers. */
++static const struct reg_entry cr_table[] =
++ {
++ { "cr0", 0 }, { "cr1", 1 }, { "cr2", 2 }, { "cr3", 3 },
++ { "cr4", 4 }, { "cr5", 5 }, { "cr6", 6 }, { "cr7", 7 },
++ { "cr8", 8 }, { "cr9", 9 }, { "cr10", 10 }, { "cr11", 11 },
++ { "cr12", 12 }, { "cr13", 13 }, { "cr14", 14 }, { "cr15", 15 },
++ };
++#define AVR32_NR_CPREGS (sizeof(cr_table)/sizeof(cr_table[0]))
++
++#define AVR32_NR_FPREGS (sizeof(fr_table)/sizeof(fr_table[0]))
++
++/* PiCo Registers. */
++static const struct reg_entry pico_table[] =
++ {
++ { "inpix2", 0 }, { "inpix1", 1 }, { "inpix0", 2 },
++ { "outpix2", 3 }, { "outpix1", 4 }, { "outpix0", 5 },
++ { "coeff0_a", 6 }, { "coeff0_b", 7 }, { "coeff1_a", 8 },
++ { "coeff1_b", 9 }, { "coeff2_a", 10 }, { "coeff2_b", 11 },
++ { "vmu0_out", 12 }, { "vmu1_out", 13 }, { "vmu2_out", 14 },
++ { "config", 15 },
++ };
++#define AVR32_NR_PICOREGS (sizeof(pico_table)/sizeof(pico_table[0]))
++
++int
++avr32_parse_intreg(const char *str)
++{
++ unsigned int i;
++
++ for (i = 0; i < AVR32_NR_INTREGS; i++)
++ {
++ if (strcasecmp(reg_table[i].name, str) == 0)
++ return reg_table[i].number;
++ }
++
++ return -1;
++}
++
++int
++avr32_parse_cpreg(const char *str)
++{
++ unsigned int i;
++
++ for (i = 0; i < AVR32_NR_CPREGS; i++)
++ {
++ if (strcasecmp(cr_table[i].name, str) == 0)
++ return cr_table[i].number;
++ }
++
++ return -1;
++}
++
++
++int avr32_parse_picoreg(const char *str)
++{
++ unsigned int i;
++
++ for (i = 0; i < AVR32_NR_PICOREGS; i++)
++ {
++ if (strcasecmp(pico_table[i].name, str) == 0)
++ return pico_table[i].number;
++ }
++
++ return -1;
++}
++
++static unsigned long
++parse_reglist(char *str, char **endptr, int (*parse_reg)(const char *))
++{
++ int reg_from, reg_to;
++ unsigned long result = 0;
++ char *p1, *p2, c;
++
++ while (*str)
++ {
++ for (p1 = str; *p1; p1++)
++ if (*p1 == ',' || *p1 == '-')
++ break;
++
++ c = *p1, *p1 = 0;
++ reg_from = parse_reg(str);
++ *p1 = c;
++
++ if (reg_from < 0)
++ break;
++
++ if (*p1 == '-')
++ {
++ for (p2 = ++p1; *p2; p2++)
++ if (*p2 == ',')
++ break;
++
++ c = *p2, *p2 = 0;
++ /* printf("going to parse reg_to from `%s'\n", p1); */
++ reg_to = parse_reg(p1);
++ *p2 = c;
++
++ if (reg_to < 0)
++ break;
++
++ while (reg_from <= reg_to)
++ result |= (1 << reg_from++);
++ p1 = p2;
++ }
++ else
++ result |= (1 << reg_from);
++
++ str = p1;
++ if (*str) ++str;
++ }
++
++ if (endptr)
++ *endptr = str;
++
++ return result;
++}
++
++unsigned long
++avr32_parse_reglist(char *str, char **endptr)
++{
++ return parse_reglist(str, endptr, avr32_parse_intreg);
++}
++
++unsigned long
++avr32_parse_cpreglist(char *str, char **endptr)
++{
++ return parse_reglist(str, endptr, avr32_parse_cpreg);
++}
++
++unsigned long
++avr32_parse_pico_reglist(char *str, char **endptr)
++{
++ return parse_reglist(str, endptr, avr32_parse_picoreg);
++}
++
++int
++avr32_make_regmask8(unsigned long regmask16, unsigned long *regmask8)
++{
++ unsigned long result = 0;
++
++ /* printf("convert regmask16 0x%04lx\n", regmask16); */
++
++ if (regmask16 & 0xf)
++ {
++ if ((regmask16 & 0xf) == 0xf)
++ result |= 1 << 0;
++ else
++ return -1;
++ }
++ if (regmask16 & 0xf0)
++ {
++ if ((regmask16 & 0xf0) == 0xf0)
++ result |= 1 << 1;
++ else
++ return -1;
++ }
++ if (regmask16 & 0x300)
++ {
++ if ((regmask16 & 0x300) == 0x300)
++ result |= 1 << 2;
++ else
++ return -1;
++ }
++ if (regmask16 & (1 << 13))
++ return -1;
++
++ if (regmask16 & (1 << 10))
++ result |= 1 << 3;
++ if (regmask16 & (1 << 11))
++ result |= 1 << 4;
++ if (regmask16 & (1 << 12))
++ result |= 1 << 5;
++ if (regmask16 & (1 << 14))
++ result |= 1 << 6;
++ if (regmask16 & (1 << 15))
++ result |= 1 << 7;
++
++ *regmask8 = result;
++
++ return 0;
++}
++
++#if 0
++struct reg_map
++{
++ const struct reg_entry *names;
++ int nr_regs;
++ struct hash_control *htab;
++ const char *errmsg;
++};
++
++struct reg_map all_reg_maps[] =
++ {
++ { reg_table, AVR32_NR_INTREGS, NULL, N_("integral register expected") },
++ { cr_table, AVR32_NR_CPREGS, NULL, N_("coprocessor register expected") },
++ };
++#endif
+--- /dev/null
++++ b/opcodes/avr32-asm.h
+@@ -0,0 +1,40 @@
++/* Assembler interface for AVR32.
++ Copyright 2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of libopcodes.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++#ifndef __OPCODES_AVR32_ASM_H
++#define __OPCODES_AVR32_ASM_H
++
++extern int
++avr32_parse_intreg(const char *str);
++extern int
++avr32_parse_cpreg(const char *str);
++extern int
++avr32_parse_picoreg(const char *str);
++extern unsigned long
++avr32_parse_reglist(char *str, char **endptr);
++extern unsigned long
++avr32_parse_cpreglist(char *str, char **endptr);
++extern unsigned long
++avr32_parse_pico_reglist(char *str, char **endptr);
++extern int
++avr32_make_regmask8(unsigned long regmask16, unsigned long *regmask8);
++
++#endif /* __OPCODES_AVR32_ASM_H */
+--- /dev/null
++++ b/opcodes/avr32-dis.c
+@@ -0,0 +1,916 @@
++/* Print AVR32 instructions for GDB and objdump.
++ Copyright 2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of libopcodes.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++
++#include "sysdep.h"
++#include "dis-asm.h"
++#include "avr32-opc.h"
++#include "opintl.h"
++#include "safe-ctype.h"
++
++/* TODO: Share this with -asm */
++
++/* Structure for a register hash table entry. */
++struct reg_entry
++{
++ const char *name;
++ int number;
++};
++
++#ifndef strneq
++#define strneq(a,b,n) (strncmp ((a), (b), (n)) == 0)
++#endif
++
++static char avr32_opt_decode_fpu = 0;
++
++static const struct reg_entry reg_table[] =
++ {
++ /* Primary names (used by the disassembler) */
++ { "r0", 0 }, { "r1", 1 }, { "r2", 2 }, { "r3", 3 },
++ { "r4", 4 }, { "r5", 5 }, { "r6", 6 }, { "r7", 7 },
++ { "r8", 8 }, { "r9", 9 }, { "r10", 10 }, { "r11", 11 },
++ { "r12", 12 }, { "sp", 13 }, { "lr", 14 }, { "pc", 15 },
++ /* Alternatives to sp, lr and pc. */
++ { "r13", 13 }, { "r14", 14 }, { "r15", 15 },
++ };
++#define AVR32_NR_INTREGS (sizeof(reg_table)/sizeof(reg_table[0]))
++
++/* Coprocessor Registers. */
++static const struct reg_entry cr_table[] =
++ {
++ { "cr0", 0 }, { "cr1", 1 }, { "cr2", 2 }, { "cr3", 3 },
++ { "cr4", 4 }, { "cr5", 5 }, { "cr6", 6 }, { "cr7", 7 },
++ { "cr8", 8 }, { "cr9", 9 }, { "cr10", 10 }, { "cr11", 11 },
++ { "cr12", 12 }, { "cr13", 13 }, { "cr14", 14 }, { "cr15", 15 },
++ };
++#define AVR32_NR_CPREGS (sizeof(cr_table)/sizeof(cr_table[0]))
++
++static const char bparts[4] = { 'b', 'l', 'u', 't' };
++static bfd_vma current_pc;
++
++struct avr32_field_value
++{
++ const struct avr32_ifield *ifield;
++ unsigned long value;
++};
++
++struct avr32_operand
++{
++ int id;
++ int is_pcrel;
++ int align_order;
++ int (*print)(struct avr32_operand *op, struct disassemble_info *info,
++ struct avr32_field_value *ifields);
++};
++
++static signed long
++get_signed_value(const struct avr32_field_value *fv)
++{
++ signed long value = fv->value;
++
++ if (fv->value & (1 << (fv->ifield->bitsize - 1)))
++ value |= (~0UL << fv->ifield->bitsize);
++
++ return value;
++}
++
++static void
++print_reglist_range(unsigned int first, unsigned int last,
++ const struct reg_entry *reg_names,
++ int need_comma,
++ struct disassemble_info *info)
++{
++ if (need_comma)
++ info->fprintf_func(info->stream, ",");
++
++ if (first == last)
++ info->fprintf_func(info->stream, "%s",
++ reg_names[first].name);
++ else
++ info->fprintf_func(info->stream, "%s-%s",
++ reg_names[first].name, reg_names[last].name);
++}
++
++static int
++print_intreg(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regid = ifields[0].value << op->align_order;
++
++ info->fprintf_func(info->stream, "%s",
++ reg_table[regid].name);
++ return 1;
++}
++
++static int
++print_intreg_predec(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "--%s",
++ reg_table[ifields[0].value].name);
++ return 1;
++}
++
++static int
++print_intreg_postinc(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "%s++",
++ reg_table[ifields[0].value].name);
++ return 1;
++}
++
++static int
++print_intreg_lsl(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ const char *rp = reg_table[ifields[0].value].name;
++ unsigned long sa = ifields[1].value;
++
++ if (sa)
++ info->fprintf_func(info->stream, "%s<<0x%lx", rp, sa);
++ else
++ info->fprintf_func(info->stream, "%s", rp);
++
++ return 2;
++}
++
++static int
++print_intreg_lsr(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ const char *rp = reg_table[ifields[0].value].name;
++ unsigned long sa = ifields[1].value;
++
++ if (sa)
++ info->fprintf_func(info->stream, "%s>>0x%lx", rp, sa);
++ else
++ info->fprintf_func(info->stream, "%s", rp);
++
++ return 2;
++}
++
++static int
++print_intreg_bpart(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "%s:%c",
++ reg_table[ifields[0].value].name,
++ bparts[ifields[1].value]);
++ return 2;
++}
++
++static int
++print_intreg_hpart(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "%s:%c",
++ reg_table[ifields[0].value].name,
++ ifields[1].value ? 't' : 'b');
++ return 2;
++}
++
++static int
++print_intreg_sdisp(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ signed long disp;
++
++ disp = get_signed_value(&ifields[1]) << op->align_order;
++
++ info->fprintf_func(info->stream, "%s[%ld]",
++ reg_table[ifields[0].value].name, disp);
++ return 2;
++}
++
++static int
++print_intreg_udisp(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "%s[0x%lx]",
++ reg_table[ifields[0].value].name,
++ ifields[1].value << op->align_order);
++ return 2;
++}
++
++static int
++print_intreg_index(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ const char *rb, *ri;
++ unsigned long sa = ifields[2].value;
++
++ rb = reg_table[ifields[0].value].name;
++ ri = reg_table[ifields[1].value].name;
++
++ if (sa)
++ info->fprintf_func(info->stream, "%s[%s<<0x%lx]", rb, ri, sa);
++ else
++ info->fprintf_func(info->stream, "%s[%s]", rb, ri);
++
++ return 3;
++}
++
++static int
++print_intreg_xindex(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "%s[%s:%c<<2]",
++ reg_table[ifields[0].value].name,
++ reg_table[ifields[1].value].name,
++ bparts[ifields[2].value]);
++ return 3;
++}
++
++static int
++print_jmplabel(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ bfd_vma address, offset;
++
++ offset = get_signed_value(ifields) << op->align_order;
++ address = (current_pc & (~0UL << op->align_order)) + offset;
++
++ info->print_address_func(address, info);
++
++ return 1;
++}
++
++static int
++print_pc_disp(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ bfd_vma address, offset;
++
++ offset = ifields[0].value << op->align_order;
++ address = (current_pc & (~0UL << op->align_order)) + offset;
++
++ info->print_address_func(address, info);
++
++ return 1;
++}
++
++static int
++print_sp(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields ATTRIBUTE_UNUSED)
++{
++ info->fprintf_func(info->stream, "sp");
++ return 1;
++}
++
++static int
++print_sp_disp(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "sp[0x%lx]",
++ ifields[0].value << op->align_order);
++ return 1;
++}
++
++static int
++print_cpno(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "cp%lu", ifields[0].value);
++ return 1;
++}
++
++static int
++print_cpreg(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "cr%lu",
++ ifields[0].value << op->align_order);
++ return 1;
++}
++
++static int
++print_uconst(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "0x%lx",
++ ifields[0].value << op->align_order);
++ return 1;
++}
++
++static int
++print_sconst(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ info->fprintf_func(info->stream, "%ld",
++ get_signed_value(ifields) << op->align_order);
++ return 1;
++}
++
++static int
++print_reglist8_head(unsigned long regmask, int *commap,
++ struct disassemble_info *info)
++{
++ int first = -1, last, i = 0;
++ int need_comma = 0;
++
++ while (i < 12)
++ {
++ if (first == -1 && (regmask & 1))
++ {
++ first = i;
++ }
++ else if (first != -1 && !(regmask & 1))
++ {
++ last = i - 1;
++
++ print_reglist_range(first, last, reg_table, need_comma, info);
++ need_comma = 1;
++ first = -1;
++ }
++
++ if (i < 8)
++ i += 4;
++ else if (i < 10)
++ i += 2;
++ else
++ i++;
++ regmask >>= 1;
++ }
++
++ *commap = need_comma;
++ return first;
++}
++
++static void
++print_reglist8_tail(unsigned long regmask, int first, int need_comma,
++ struct disassemble_info *info)
++{
++ int last = 11;
++
++ if (regmask & 0x20)
++ {
++ if (first == -1)
++ first = 12;
++ last = 12;
++ }
++
++ if (first != -1)
++ {
++ print_reglist_range(first, last, reg_table, need_comma, info);
++ need_comma = 1;
++ first = -1;
++ }
++
++ if (regmask & 0x40)
++ {
++ if (first == -1)
++ first = 14;
++ last = 14;
++ }
++
++ if (regmask & 0x80)
++ {
++ if (first == -1)
++ first = 15;
++ last = 15;
++ }
++
++ if (first != -1)
++ print_reglist_range(first, last, reg_table, need_comma, info);
++}
++
++static int
++print_reglist8(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regmask = ifields[0].value;
++ int first, need_comma;
++
++ first = print_reglist8_head(regmask, &need_comma, info);
++ print_reglist8_tail(regmask, first, need_comma, info);
++
++ return 1;
++}
++
++static int
++print_reglist9(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regmask = ifields[0].value >> 1;
++ int first, last, need_comma;
++
++ first = print_reglist8_head(regmask, &need_comma, info);
++
++ if ((ifields[0].value & 0x101) == 0x101)
++ {
++ if (first != -1)
++ {
++ last = 11;
++
++ print_reglist_range(first, last, reg_table, need_comma, info);
++ need_comma = 1;
++ first = -1;
++ }
++
++ print_reglist_range(15, 15, reg_table, need_comma, info);
++
++ regmask >>= 5;
++
++ if ((regmask & 3) == 0)
++ info->fprintf_func(info->stream, ",r12=0");
++ else if ((regmask & 3) == 1)
++ info->fprintf_func(info->stream, ",r12=1");
++ else
++ info->fprintf_func(info->stream, ",r12=-1");
++ }
++ else
++ print_reglist8_tail(regmask, first, need_comma, info);
++
++ return 1;
++}
++
++static int
++print_reglist16(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regmask = ifields[0].value;
++ unsigned int i = 0, first, last;
++ int need_comma = 0;
++
++ while (i < 16)
++ {
++ if (regmask & 1)
++ {
++ first = i;
++ while (i < 16)
++ {
++ i++;
++ regmask >>= 1;
++ if (!(regmask & 1))
++ break;
++ }
++ last = i - 1;
++ print_reglist_range(first, last, reg_table, need_comma, info);
++ need_comma = 1;
++ }
++ else
++ {
++ i++;
++ regmask >>= 1;
++ }
++ }
++
++ return 1;
++}
++
++static int
++print_reglist_ldm(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ int rp, w_bit;
++ int i, first, last;
++ unsigned long regmask;
++
++ rp = ifields[0].value;
++ w_bit = ifields[1].value;
++ regmask = ifields[2].value;
++
++ if (regmask & (1 << AVR32_REG_PC) && rp == AVR32_REG_PC)
++ {
++ if (w_bit)
++ info->fprintf_func(info->stream, "sp++");
++ else
++ info->fprintf_func(info->stream, "sp");
++
++ for (i = 0; i < 12; )
++ {
++ if (regmask & (1 << i))
++ {
++ first = i;
++ while (i < 12)
++ {
++ i++;
++ if (!(regmask & (1 << i)))
++ break;
++ }
++ last = i - 1;
++ print_reglist_range(first, last, reg_table, 1, info);
++ }
++ else
++ i++;
++ }
++
++ info->fprintf_func(info->stream, ",pc");
++ if (regmask & (1 << AVR32_REG_LR))
++ info->fprintf_func(info->stream, ",r12=-1");
++ else if (regmask & (1 << AVR32_REG_R12))
++ info->fprintf_func(info->stream, ",r12=1");
++ else
++ info->fprintf_func(info->stream, ",r12=0");
++ }
++ else
++ {
++ if (w_bit)
++ info->fprintf_func(info->stream, "%s++,", reg_table[rp].name);
++ else
++ info->fprintf_func(info->stream, "%s,", reg_table[rp].name);
++
++ print_reglist16(op, info, ifields + 2);
++ }
++
++ return 3;
++}
++
++static int
++print_reglist_cp8(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regmask = ifields[0].value;
++ unsigned int i = 0, first, last, offset = 0;
++ int need_comma = 0;
++
++ if (ifields[1].value)
++ offset = 8;
++
++ while (i < 8)
++ {
++ if (regmask & 1)
++ {
++ first = i;
++ while (i < 8)
++ {
++ i++;
++ regmask >>= 1;
++ if (!(regmask & 1))
++ break;
++ }
++ last = i - 1;
++ print_reglist_range(offset + first, offset + last,
++ cr_table, need_comma, info);
++ need_comma = 1;
++ }
++ else
++ {
++ i++;
++ regmask >>= 1;
++ }
++ }
++
++ return 2;
++}
++
++static int
++print_reglist_cpd8(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regmask = ifields[0].value;
++ unsigned int i = 0, first, last;
++ int need_comma = 0;
++
++ while (i < 8)
++ {
++ if (regmask & 1)
++ {
++ first = 2 * i;
++ while (i < 8)
++ {
++ i++;
++ regmask >>= 1;
++ if (!(regmask & 1))
++ break;
++ }
++ last = 2 * (i - 1) + 1;
++ print_reglist_range(first, last, cr_table, need_comma, info);
++ need_comma = 1;
++ }
++ else
++ {
++ i++;
++ regmask >>= 1;
++ }
++ }
++
++ return 1;
++}
++
++static int
++print_retval(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regid = ifields[0].value;
++ const char *retval;
++
++ if (regid < AVR32_REG_SP)
++ retval = reg_table[regid].name;
++ else if (regid == AVR32_REG_SP)
++ retval = "0";
++ else if (regid == AVR32_REG_LR)
++ retval = "-1";
++ else
++ retval = "1";
++
++ info->fprintf_func(info->stream, "%s", retval);
++
++ return 1;
++}
++
++static int
++print_mcall(struct avr32_operand *op,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ unsigned long regid = ifields[0].value;
++
++ if (regid == AVR32_REG_PC)
++ print_jmplabel(op, info, ifields + 1);
++ else
++ print_intreg_sdisp(op, info, ifields);
++
++ return 2;
++}
++
++static int
++print_jospinc(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields)
++{
++ signed long value = ifields[0].value;
++
++ if (value >= 4)
++ value -= 8;
++ else
++ value += 1;
++
++ info->fprintf_func(info->stream, "%ld", value);
++
++ return 1;
++}
++
++static int
++print_coh(struct avr32_operand *op ATTRIBUTE_UNUSED,
++ struct disassemble_info *info,
++ struct avr32_field_value *ifields ATTRIBUTE_UNUSED)
++{
++ info->fprintf_func(info->stream, "COH");
++ return 0;
++}
++
++#define OP(name, sgn, pcrel, align, func) \
++ { AVR32_OPERAND_##name, pcrel, align, print_##func }
++
++struct avr32_operand operand[AVR32_NR_OPERANDS] =
++ {
++ OP(INTREG, 0, 0, 0, intreg),
++ OP(INTREG_PREDEC, 0, 0, 0, intreg_predec),
++ OP(INTREG_POSTINC, 0, 0, 0, intreg_postinc),
++ OP(INTREG_LSL, 0, 0, 0, intreg_lsl),
++ OP(INTREG_LSR, 0, 0, 0, intreg_lsr),
++ OP(INTREG_BSEL, 0, 0, 0, intreg_bpart),
++ OP(INTREG_HSEL, 0, 0, 1, intreg_hpart),
++ OP(INTREG_SDISP, 1, 0, 0, intreg_sdisp),
++ OP(INTREG_SDISP_H, 1, 0, 1, intreg_sdisp),
++ OP(INTREG_SDISP_W, 1, 0, 2, intreg_sdisp),
++ OP(INTREG_UDISP, 0, 0, 0, intreg_udisp),
++ OP(INTREG_UDISP_H, 0, 0, 1, intreg_udisp),
++ OP(INTREG_UDISP_W, 0, 0, 2, intreg_udisp),
++ OP(INTREG_INDEX, 0, 0, 0, intreg_index),
++ OP(INTREG_XINDEX, 0, 0, 0, intreg_xindex),
++ OP(DWREG, 0, 0, 1, intreg),
++ OP(PC_UDISP_W, 0, 1, 2, pc_disp),
++ OP(SP, 0, 0, 0, sp),
++ OP(SP_UDISP_W, 0, 0, 2, sp_disp),
++ OP(CPNO, 0, 0, 0, cpno),
++ OP(CPREG, 0, 0, 0, cpreg),
++ OP(CPREG_D, 0, 0, 1, cpreg),
++ OP(UNSIGNED_CONST, 0, 0, 0, uconst),
++ OP(UNSIGNED_CONST_W, 0, 0, 2, uconst),
++ OP(SIGNED_CONST, 1, 0, 0, sconst),
++ OP(SIGNED_CONST_W, 1, 0, 2, sconst),
++ OP(JMPLABEL, 1, 1, 1, jmplabel),
++ OP(UNSIGNED_NUMBER, 0, 0, 0, uconst),
++ OP(UNSIGNED_NUMBER_W, 0, 0, 2, uconst),
++ OP(REGLIST8, 0, 0, 0, reglist8),
++ OP(REGLIST9, 0, 0, 0, reglist9),
++ OP(REGLIST16, 0, 0, 0, reglist16),
++ OP(REGLIST_LDM, 0, 0, 0, reglist_ldm),
++ OP(REGLIST_CP8, 0, 0, 0, reglist_cp8),
++ OP(REGLIST_CPD8, 0, 0, 0, reglist_cpd8),
++ OP(RETVAL, 0, 0, 0, retval),
++ OP(MCALL, 1, 0, 2, mcall),
++ OP(JOSPINC, 0, 0, 0, jospinc),
++ OP(COH, 0, 0, 0, coh),
++ };
++
++static void
++print_opcode(bfd_vma insn_word, const struct avr32_opcode *opc,
++ bfd_vma pc, struct disassemble_info *info)
++{
++ const struct avr32_syntax *syntax = opc->syntax;
++ struct avr32_field_value fields[AVR32_MAX_FIELDS];
++ unsigned int i, next_field = 0, nr_operands;
++
++ for (i = 0; i < opc->nr_fields; i++)
++ {
++ opc->fields[i]->extract(opc->fields[i], &insn_word, &fields[i].value);
++ fields[i].ifield = opc->fields[i];
++ }
++
++ current_pc = pc;
++ info->fprintf_func(info->stream, "%s", syntax->mnemonic->name);
++
++ if (syntax->nr_operands < 0)
++ nr_operands = (unsigned int) -syntax->nr_operands;
++ else
++ nr_operands = (unsigned int) syntax->nr_operands;
++
++ for (i = 0; i < nr_operands; i++)
++ {
++ struct avr32_operand *op = &operand[syntax->operand[i]];
++
++ if (i)
++ info->fprintf_func(info->stream, ",");
++ else
++ info->fprintf_func(info->stream, "\t");
++ next_field += op->print(op, info, &fields[next_field]);
++ }
++}
++
++#define is_fpu_insn(iw) ((iw&0xf9f0e000)==0xe1a00000)
++
++static const struct avr32_opcode *
++find_opcode(bfd_vma insn_word)
++{
++ int i;
++
++ for (i = 0; i < AVR32_NR_OPCODES; i++)
++ {
++ const struct avr32_opcode *opc = &avr32_opc_table[i];
++
++ if ((insn_word & opc->mask) == opc->value)
++ {
++ if (avr32_opt_decode_fpu)
++ {
++ if (is_fpu_insn(insn_word))
++ {
++ if (opc->id != AVR32_OPC_COP)
++ return opc;
++ }
++ else
++ return opc;
++ }
++ else
++ return opc;
++ }
++ }
++
++ return NULL;
++}
++
++static int
++read_insn_word(bfd_vma pc, bfd_vma *valuep,
++ struct disassemble_info *info)
++{
++ bfd_byte b[4];
++ int status;
++
++ status = info->read_memory_func(pc, b, 4, info);
++ if (status)
++ {
++ status = info->read_memory_func(pc, b, 2, info);
++ if (status)
++ {
++ info->memory_error_func(status, pc, info);
++ return -1;
++ }
++ b[3] = b[2] = 0;
++ }
++
++ *valuep = (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
++ return 0;
++}
++
++/* Parse an individual disassembler option. */
++
++void
++parse_avr32_disassembler_option (option)
++ char * option;
++{
++ if (option == NULL)
++ return;
++
++ if (!strcmp(option,"decode-fpu"))
++ {
++ avr32_opt_decode_fpu = 1;
++ return;
++ }
++
++ printf("\n%s--",option);
++ /* XXX - should break 'option' at following delimiter. */
++ fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
++
++ return;
++}
++
++/* Parse the string of disassembler options, spliting it at whitespaces
++ or commas. (Whitespace separators supported for backwards compatibility). */
++
++static void
++parse_disassembler_options (char *options)
++{
++ if (options == NULL)
++ return;
++
++ while (*options)
++ {
++ parse_avr32_disassembler_option (options);
++
++ /* Skip forward to next seperator. */
++ while ((*options) && (! ISSPACE (*options)) && (*options != ','))
++ ++ options;
++ /* Skip forward past seperators. */
++ while (ISSPACE (*options) || (*options == ','))
++ ++ options;
++ }
++}
++
++int
++print_insn_avr32(bfd_vma pc, struct disassemble_info *info)
++{
++ bfd_vma insn_word;
++ const struct avr32_opcode *opc;
++
++ if (info->disassembler_options)
++ {
++ parse_disassembler_options (info->disassembler_options);
++
++ /* To avoid repeated parsing of these options, we remove them here. */
++ info->disassembler_options = NULL;
++ }
++
++ info->bytes_per_chunk = 1;
++ info->display_endian = BFD_ENDIAN_BIG;
++
++ if (read_insn_word(pc, &insn_word, info))
++ return -1;
++
++ opc = find_opcode(insn_word);
++ if (opc)
++ {
++ print_opcode(insn_word, opc, pc, info);
++ return opc->size;
++ }
++ else
++ {
++ info->fprintf_func(info->stream, _("*unknown*"));
++ return 2;
++ }
++
++}
++
++void
++print_avr32_disassembler_options (FILE *stream ATTRIBUTE_UNUSED)
++{
++ fprintf(stream, "\n AVR32 Specific Disassembler Options:\n");
++ fprintf(stream, " -M decode-fpu Print FPU instructions instead of 'cop' \n");
++}
++
+--- /dev/null
++++ b/opcodes/avr32-opc.c
+@@ -0,0 +1,6906 @@
++/* Opcode tables for AVR32.
++ Copyright 2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of libopcodes.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++
++#include <stdlib.h>
++#include <assert.h>
++
++#include "avr32-opc.h"
++
++#define PICO_CPNO 1
++
++void
++avr32_insert_simple(const struct avr32_ifield *field,
++ void *buf, unsigned long value)
++{
++ bfd_vma word;
++
++ word = bfd_getb32(buf);
++ word &= ~field->mask;
++ word |= (value << field->shift) & field->mask;
++ bfd_putb32(word, buf);
++}
++
++void
++avr32_insert_bit5c(const struct avr32_ifield *field ATTRIBUTE_UNUSED,
++ void *buf, unsigned long value)
++{
++ char *opcode = buf;
++
++ opcode[0] = (opcode[0] & 0xe1) | (value & 0x1e);
++ opcode[1] = (opcode[1] & 0xef) | ((value & 1) << 4);
++}
++
++void
++avr32_insert_k10(const struct avr32_ifield *field ATTRIBUTE_UNUSED,
++ void *buf, unsigned long value)
++{
++ char *opcode = buf;
++
++ opcode[0] = (opcode[0] & 0xf0) | ((value & 0xf0) >> 4);
++ opcode[1] = ((opcode[1] & 0x0c) | ((value & 0x0f) << 4)
++ | ((value & 0x300) >> 8));
++}
++
++
++void
++avr32_insert_k21(const struct avr32_ifield *field,
++ void *buf, unsigned long value)
++{
++ bfd_vma word;
++ bfd_vma k21;
++
++ word = bfd_getb32(buf);
++ word &= ~field->mask;
++ k21 = ((value & 0xffff) | ((value & 0x10000) << 4)
++ | ((value & 0x1e0000) << 8));
++ assert(!(k21 & ~field->mask));
++ word |= k21;
++ bfd_putb32(word, buf);
++}
++
++void
++avr32_insert_cpop(const struct avr32_ifield *field,
++ void *buf, unsigned long value)
++{
++ bfd_vma word;
++
++ word = bfd_getb32(buf);
++ word &= ~field->mask;
++ word |= (((value & 0x1e) << 15) | ((value & 0x60) << 20)
++ | ((value & 0x01) << 12));
++ bfd_putb32(word, buf);
++}
++
++void
++avr32_insert_k12cp(const struct avr32_ifield *field,
++ void *buf, unsigned long value)
++{
++ bfd_vma word;
++
++ word = bfd_getb32(buf);
++ word &= ~field->mask;
++ word |= ((value & 0xf00) << 4) | (value & 0xff);
++ bfd_putb32(word, buf);
++}
++
++void avr32_extract_simple(const struct avr32_ifield *field,
++ void *buf, unsigned long *value)
++{
++ /* XXX: The disassembler has done any necessary byteswapping already */
++ bfd_vma word = *(bfd_vma *)buf;
++
++ *value = (word & field->mask) >> field->shift;
++}
++
++void avr32_extract_bit5c(const struct avr32_ifield *field ATTRIBUTE_UNUSED,
++ void *buf, unsigned long *value)
++{
++ bfd_vma word = *(bfd_vma *)buf;
++
++ *value = ((word >> 20) & 1) | ((word >> 24) & 0x1e);
++}
++
++void avr32_extract_k10(const struct avr32_ifield *field ATTRIBUTE_UNUSED,
++ void *buf, unsigned long *value)
++{
++ bfd_vma word = *(bfd_vma *)buf;
++
++ *value = ((word >> 8) & 0x300) | ((word >> 20) & 0xff);
++}
++
++void avr32_extract_k21(const struct avr32_ifield *field ATTRIBUTE_UNUSED,
++ void *buf, unsigned long *value)
++{
++ bfd_vma word = *(bfd_vma *)buf;
++
++ *value = ((word & 0xffff) | ((word >> 4) & 0x10000)
++ | ((word >> 8) & 0x1e0000));
++}
++
++void avr32_extract_cpop(const struct avr32_ifield *field ATTRIBUTE_UNUSED,
++ void *buf, unsigned long *value)
++{
++ bfd_vma word = *(bfd_vma *)buf;
++
++ *value = (((word >> 12) & 1) | ((word >> 15) & 0x1e)
++ | ((word >> 20) & 0x60));
++}
++
++void avr32_extract_k12cp(const struct avr32_ifield *field ATTRIBUTE_UNUSED,
++ void *buf, unsigned long *value)
++{
++ bfd_vma word = *(bfd_vma *)buf;
++
++ *value = ((word >> 4) & 0xf00) | (word & 0xff);
++}
++
++
++#define IFLD(id, bitsz, shift, mask, func) \
++ { AVR32_IFIELD_##id, bitsz, shift, mask, \
++ avr32_insert_##func, avr32_extract_##func }
++
++const struct avr32_ifield avr32_ifield_table[] =
++ {
++ IFLD(RX, 4, 25, 0x1e000000, simple),
++ IFLD(RY, 4, 16, 0x000f0000, simple),
++ IFLD(COND4C, 4, 20, 0x00f00000, simple),
++ IFLD(K8C, 8, 20, 0x0ff00000, simple),
++ IFLD(K7C, 7, 20, 0x07f00000, simple),
++ IFLD(K5C, 5, 20, 0x01f00000, simple),
++ IFLD(K3, 3, 20, 0x00700000, simple),
++ IFLD(RY_DW, 3, 17, 0x000e0000, simple),
++ IFLD(COND4E, 4, 8, 0x00000f00, simple),
++ IFLD(K8E, 8, 0, 0x000000ff, simple),
++ IFLD(BIT5C, 5, 20, 0x1e100000, bit5c),
++ IFLD(COND3, 3, 16, 0x00070000, simple),
++ IFLD(K10, 10, 16, 0x0ff30000, k10),
++ IFLD(POPM, 9, 19, 0x0ff80000, simple),
++ IFLD(K2, 2, 4, 0x00000030, simple),
++ IFLD(RD_E, 4, 0, 0x0000000f, simple),
++ IFLD(RD_DW, 3, 1, 0x0000000e, simple),
++ IFLD(X, 1, 5, 0x00000020, simple),
++ IFLD(Y, 1, 4, 0x00000010, simple),
++ IFLD(X2, 1, 13, 0x00002000, simple),
++ IFLD(Y2, 1, 12, 0x00001000, simple),
++ IFLD(K5E, 5, 0, 0x0000001f, simple),
++ IFLD(PART2, 2, 0, 0x00000003, simple),
++ IFLD(PART1, 1, 0, 0x00000001, simple),
++ IFLD(K16, 16, 0, 0x0000ffff, simple),
++ IFLD(CACHEOP, 5, 11, 0x0000f800, simple),
++ IFLD(K11, 11, 0, 0x000007ff, simple),
++ IFLD(K21, 21, 0, 0x1e10ffff, k21),
++ IFLD(CPOP, 7, 12, 0x060f1000, cpop),
++ IFLD(CPNO, 3, 13, 0x0000e000, simple),
++ IFLD(CRD_RI, 4, 8, 0x00000f00, simple),
++ IFLD(CRX, 4, 4, 0x000000f0, simple),
++ IFLD(CRY, 4, 0, 0x0000000f, simple),
++ IFLD(K7E, 7, 0, 0x0000007f, simple),
++ IFLD(CRD_DW, 3, 9, 0x00000e00, simple),
++ IFLD(PART1_K12, 1, 12, 0x00001000, simple),
++ IFLD(PART2_K12, 2, 12, 0x00003000, simple),
++ IFLD(K12, 12, 0, 0x00000fff, simple),
++ IFLD(S5, 5, 5, 0x000003e0, simple),
++ IFLD(K5E2, 5, 4, 0x000001f0, simple),
++ IFLD(K4, 4, 20, 0x00f00000, simple),
++ IFLD(COND4E2, 4, 4, 0x000000f0, simple),
++ IFLD(K8E2, 8, 4, 0x00000ff0, simple),
++ IFLD(K6, 6, 20, 0x03f00000, simple),
++ IFLD(MEM15, 15, 0, 0x00007fff, simple),
++ IFLD(MEMB5, 5, 15, 0x000f8000, simple),
++ IFLD(W, 1, 25, 0x02000000, simple),
++ /* Coprocessor Multiple High/Low */
++ IFLD(CM_HL, 1, 8, 0x00000100, simple),
++ IFLD(K12CP, 12 ,0, 0x0000f0ff, k12cp),
++ IFLD(K9E, 9 ,0, 0x000001ff, simple),
++ IFLD (FP_RX, 4, 4, 0x000000F0, simple),
++ IFLD (FP_RY, 4, 0, 0x0000000F, simple),
++ IFLD (FP_RD, 4, 8, 0x00000F00, simple),
++ IFLD (FP_RA, 4, 16, 0x000F0000, simple)
++ };
++#undef IFLD
++
++
++struct avr32_opcode avr32_opc_table[] =
++ {
++ {
++ AVR32_OPC_ABS, 2, 0x5c400000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ABS],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_ACALL, 2, 0xd0000000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_ACALL],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_ACR, 2, 0x5c000000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ACR],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ADC, 4, 0xe0000040, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_ADC],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ADD1, 2, 0x00000000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ADD1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_ADD2, 4, 0xe0000000, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_ADD2],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_ADDABS, 4, 0xe0000e40, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_ADDABS],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ADDHH_W, 4, 0xe0000e00, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_ADDHH_W],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_AND1, 2, 0x00600000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_AND1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_AND2, 4, 0xe1e00000, 0xe1f0fe00,
++ &avr32_syntax_table[AVR32_SYNTAX_AND2],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E2],
++ },
++ },
++ {
++ AVR32_OPC_AND3, 4, 0xe1e00200, 0xe1f0fe00,
++ &avr32_syntax_table[AVR32_SYNTAX_AND3],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E2],
++ },
++ },
++ {
++ AVR32_OPC_ANDH, 4, 0xe4100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ANDH],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_ANDH_COH, 4, 0xe6100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ANDH_COH],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_ANDL, 4, 0xe0100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ANDL],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_ANDL_COH, 4, 0xe2100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ANDL_COH],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_ANDN, 2, 0x00800000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ANDN],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_ASR1, 4, 0xe0000840, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_ASR1],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ASR3, 4, 0xe0001400, 0xe1f0ffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_ASR3],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++ {
++ AVR32_OPC_ASR2, 2, 0xa1400000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ASR2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_BIT5C],
++ },
++ },
++ {
++ AVR32_OPC_BLD, 4, 0xedb00000, 0xfff0ffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_BLD],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++ {
++ AVR32_OPC_BREQ1, 2, 0xc0000000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BREQ1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BRNE1, 2, 0xc0010000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRNE1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BRCC1, 2, 0xc0020000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRCC1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BRCS1, 2, 0xc0030000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRCS1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BRGE1, 2, 0xc0040000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRGE1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BRLT1, 2, 0xc0050000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRLT1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BRMI1, 2, 0xc0060000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRMI1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BRPL1, 2, 0xc0070000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRPL1],
++ BFD_RELOC_AVR32_9H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_BREQ2, 4, 0xe0800000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BREQ2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRNE2, 4, 0xe0810000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRNE2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRCC2, 4, 0xe0820000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRHS2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRCS2, 4, 0xe0830000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRLO2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRGE2, 4, 0xe0840000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRGE2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRLT2, 4, 0xe0850000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRLT2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRMI2, 4, 0xe0860000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRMI2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRPL2, 4, 0xe0870000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRPL2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRLS, 4, 0xe0880000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRLS],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRGT, 4, 0xe0890000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRGT],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRLE, 4, 0xe08a0000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRLE],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRHI, 4, 0xe08b0000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRHI],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRVS, 4, 0xe08c0000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRVS],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRVC, 4, 0xe08d0000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRVC],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRQS, 4, 0xe08e0000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRQS],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BRAL, 4, 0xe08f0000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BRAL],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_BREAKPOINT, 2, 0xd6730000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_BREAKPOINT],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_BREV, 2, 0x5c900000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_BREV],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_BST, 4, 0xefb00000, 0xfff0ffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_BST],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++ {
++ AVR32_OPC_CACHE, 4, 0xf4100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CACHE],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K11],
++ &avr32_ifield_table[AVR32_IFIELD_CACHEOP],
++ },
++ },
++ {
++ AVR32_OPC_CASTS_B, 2, 0x5c600000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CASTS_B],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_CASTS_H, 2, 0x5c800000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CASTS_H],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_CASTU_B, 2, 0x5c500000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CASTU_B],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_CASTU_H, 2, 0x5c700000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CASTU_H],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_CBR, 2, 0xa1c00000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CBR],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_BIT5C],
++ },
++ },
++ {
++ AVR32_OPC_CLZ, 4, 0xe0001200, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_CLZ],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_COM, 2, 0x5cd00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_COM],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_COP, 4, 0xe1a00000, 0xf9f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_COP],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ &avr32_ifield_table[AVR32_IFIELD_CRX],
++ &avr32_ifield_table[AVR32_IFIELD_CRY],
++ &avr32_ifield_table[AVR32_IFIELD_CPOP],
++ },
++ },
++ {
++ AVR32_OPC_CP_B, 4, 0xe0001800, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_CP_B],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_CP_H, 4, 0xe0001900, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_CP_H],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_CP_W1, 2, 0x00300000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CP_W1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_CP_W2, 2, 0x58000000, 0xfc000000,
++ &avr32_syntax_table[AVR32_SYNTAX_CP_W2],
++ BFD_RELOC_AVR32_6S, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K6],
++ },
++ },
++ {
++ AVR32_OPC_CP_W3, 4, 0xe0400000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CP_W3],
++ BFD_RELOC_AVR32_21S, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_CPC1, 4, 0xe0001300, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_CPC1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_CPC2, 2, 0x5c200000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_CPC2],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_CSRF, 2, 0xd4030000, 0xfe0f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_CSRF],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K5C],
++ },
++ },
++ {
++ AVR32_OPC_CSRFCZ, 2, 0xd0030000, 0xfe0f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_CSRFCZ],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K5C],
++ },
++ },
++ {
++ AVR32_OPC_DIVS, 4, 0xe0000c00, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_DIVS],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_DIVU, 4, 0xe0000d00, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_DIVU],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_EOR1, 2, 0x00500000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_EOR1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_EOR2, 4, 0xe1e02000, 0xe1f0fe00,
++ &avr32_syntax_table[AVR32_SYNTAX_EOR2],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E2],
++ }
++ },
++ {
++ AVR32_OPC_EOR3, 4, 0xe1e02200, 0xe1f0fe00,
++ &avr32_syntax_table[AVR32_SYNTAX_EOR3],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E2],
++ }
++ },
++ {
++ AVR32_OPC_EORL, 4, 0xec100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_EORL],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_EORH, 4, 0xee100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_EORH],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_FRS, 2, 0xd7430000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_FRS],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_ICALL, 2, 0x5d100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ICALL],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_INCJOSP, 2, 0xd6830000, 0xff8f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_INCJOSP],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K3],
++ },
++ },
++ {
++ AVR32_OPC_LD_D1, 2, 0xa1010000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_D1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_D2, 2, 0xa1100000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_D2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_D3, 2, 0xa1000000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_D3],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_D5, 4, 0xe0000200, 0xe1f0ffc1,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_D5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LD_D4, 4, 0xe0e00000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_D4],
++ BFD_RELOC_AVR32_16S, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LD_SB2, 4, 0xe0000600, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_SB2],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LD_SB1, 4, 0xe1200000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_SB1],
++ BFD_RELOC_AVR32_16S, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LD_UB1, 2, 0x01300000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UB1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_UB2, 2, 0x01700000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UB2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_UB5, 4, 0xe0000700, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UB5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LD_UB3, 2, 0x01800000, 0xe1800000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UB3],
++ BFD_RELOC_AVR32_3U, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K3],
++ },
++ },
++ {
++ AVR32_OPC_LD_UB4, 4, 0xe1300000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UB4],
++ BFD_RELOC_AVR32_16S, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LD_SH1, 2, 0x01100000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_SH1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_SH2, 2, 0x01500000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_SH2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_SH5, 4, 0xe0000400, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_SH5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LD_SH3, 2, 0x80000000, 0xe1800000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_SH3],
++ BFD_RELOC_AVR32_4UH, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K3],
++ },
++ },
++ {
++ AVR32_OPC_LD_SH4, 4, 0xe1000000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_SH4],
++ BFD_RELOC_AVR32_16S, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LD_UH1, 2, 0x01200000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UH1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_UH2, 2, 0x01600000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UH2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_UH5, 4, 0xe0000500, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UH5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LD_UH3, 2, 0x80800000, 0xe1800000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UH3],
++ BFD_RELOC_AVR32_4UH, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K3],
++ },
++ },
++ {
++ AVR32_OPC_LD_UH4, 4, 0xe1100000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_UH4],
++ BFD_RELOC_AVR32_16S, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LD_W1, 2, 0x01000000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_W1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_W2, 2, 0x01400000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_W2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_LD_W5, 4, 0xe0000300, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_W5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LD_W6, 4, 0xe0000f80, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_W6],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LD_W3, 2, 0x60000000, 0xe0000000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_W3],
++ BFD_RELOC_AVR32_7UW, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K5C],
++ },
++ },
++ {
++ AVR32_OPC_LD_W4, 4, 0xe0f00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LD_W4],
++ BFD_RELOC_AVR32_16S, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LDC_D1, 4, 0xe9a01000, 0xfff01100,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC_D1],
++ BFD_RELOC_AVR32_10UW, 4, 3,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_LDC_D2, 4, 0xefa00050, 0xfff011ff,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC_D2],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_LDC_D3, 4, 0xefa01040, 0xfff011c0,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC_D3],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LDC_W1, 4, 0xe9a00000, 0xfff01000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC_W1],
++ BFD_RELOC_AVR32_10UW, 4, 3,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_LDC_W2, 4, 0xefa00040, 0xfff010ff,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC_W2],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_LDC_W3, 4, 0xefa01000, 0xfff010c0,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC_W3],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_LDC0_D, 4, 0xf3a00000, 0xfff00100,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC0_D],
++ BFD_RELOC_AVR32_14UW, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K12CP],
++ },
++ },
++ {
++ AVR32_OPC_LDC0_W, 4, 0xf1a00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDC0_W],
++ BFD_RELOC_AVR32_14UW, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K12CP],
++ },
++ },
++ {
++ AVR32_OPC_LDCM_D, 4, 0xeda00400, 0xfff01f00,
++ &avr32_syntax_table[AVR32_SYNTAX_LDCM_D],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_LDCM_D_PU, 4, 0xeda01400, 0xfff01f00,
++ &avr32_syntax_table[AVR32_SYNTAX_LDCM_D_PU],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_LDCM_W, 4, 0xeda00000, 0xfff01e00,
++ &avr32_syntax_table[AVR32_SYNTAX_LDCM_W],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_CM_HL],
++ },
++ },
++ {
++ AVR32_OPC_LDCM_W_PU, 4, 0xeda01000, 0xfff01e00,
++ &avr32_syntax_table[AVR32_SYNTAX_LDCM_W_PU],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_CM_HL],
++ },
++ },
++ {
++ AVR32_OPC_LDDPC, 2, 0x48000000, 0xf8000000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDDPC],
++ BFD_RELOC_AVR32_9UW_PCREL, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K7C],
++ },
++ },
++ {
++ AVR32_OPC_LDDPC_EXT, 4, 0xfef00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDDPC_EXT],
++ BFD_RELOC_AVR32_16B_PCREL, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LDDSP, 2, 0x40000000, 0xf8000000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDDSP],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K7C],
++ },
++ },
++ {
++ AVR32_OPC_LDINS_B, 4, 0xe1d04000, 0xe1f0c000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDINS_B],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_PART2_K12],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K12],
++ },
++ },
++ {
++ AVR32_OPC_LDINS_H, 4, 0xe1d00000, 0xe1f0e000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDINS_H],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_PART1_K12],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K12],
++ },
++ },
++ {
++ AVR32_OPC_LDM, 4, 0xe1c00000, 0xfdf00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDM],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_W],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LDMTS, 4, 0xe5c00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDMTS],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LDMTS_PU, 4, 0xe7c00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDMTS_PU],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_LDSWP_SH, 4, 0xe1d02000, 0xe1f0f000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDSWP_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K12],
++ },
++ },
++ {
++ AVR32_OPC_LDSWP_UH, 4, 0xe1d03000, 0xe1f0f000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDSWP_UH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K12],
++ },
++ },
++ {
++ AVR32_OPC_LDSWP_W, 4, 0xe1d08000, 0xe1f0f000,
++ &avr32_syntax_table[AVR32_SYNTAX_LDSWP_W],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K12],
++ },
++ },
++ {
++ AVR32_OPC_LSL1, 4, 0xe0000940, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_LSL1],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_LSL3, 4, 0xe0001500, 0xe1f0ffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_LSL3],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++ {
++ AVR32_OPC_LSL2, 2, 0xa1600000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LSL2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_BIT5C],
++ },
++ },
++ {
++ AVR32_OPC_LSR1, 4, 0xe0000a40, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_LSR1],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_LSR3, 4, 0xe0001600, 0xe1f0ffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_LSR3],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++ {
++ AVR32_OPC_LSR2, 2, 0xa1800000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_LSR2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_BIT5C],
++ },
++ },
++ {
++ AVR32_OPC_MAC, 4, 0xe0000340, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_MAC],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MACHH_D, 4, 0xe0000580, 0xe1f0ffc1,
++ &avr32_syntax_table[AVR32_SYNTAX_MACHH_D],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MACHH_W, 4, 0xe0000480, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_MACHH_W],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MACS_D, 4, 0xe0000540, 0xe1f0fff1,
++ &avr32_syntax_table[AVR32_SYNTAX_MACS_D],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MACSATHH_W, 4, 0xe0000680, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_MACSATHH_W],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MACUD, 4, 0xe0000740, 0xe1f0fff1,
++ &avr32_syntax_table[AVR32_SYNTAX_MACUD],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MACWH_D, 4, 0xe0000c80, 0xe1f0ffe1,
++ &avr32_syntax_table[AVR32_SYNTAX_MACWH_D],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MAX, 4, 0xe0000c40, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_MAX],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MCALL, 4, 0xf0100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MCALL],
++ BFD_RELOC_AVR32_18W_PCREL, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_MFDR, 4, 0xe5b00000, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MFDR],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MFSR, 4, 0xe1b00000, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MFSR],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MIN, 4, 0xe0000d40, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_MIN],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MOV3, 2, 0x00900000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MOV3],
++ BFD_RELOC_NONE, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOV1, 2, 0x30000000, 0xf0000000,
++ &avr32_syntax_table[AVR32_SYNTAX_MOV1],
++ BFD_RELOC_AVR32_8S, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_MOV2, 4, 0xe0600000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MOV2],
++ BFD_RELOC_AVR32_21S, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_MOVEQ1, 4, 0xe0001700, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVEQ1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVNE1, 4, 0xe0001710, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVNE1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVCC1, 4, 0xe0001720, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVHS1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVCS1, 4, 0xe0001730, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLO1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVGE1, 4, 0xe0001740, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVGE1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVLT1, 4, 0xe0001750, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLT1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVMI1, 4, 0xe0001760, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVMI1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVPL1, 4, 0xe0001770, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVPL1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVLS1, 4, 0xe0001780, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLS1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVGT1, 4, 0xe0001790, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVGT1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVLE1, 4, 0xe00017a0, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLE1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVHI1, 4, 0xe00017b0, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVHI1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVVS1, 4, 0xe00017c0, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVVS1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVVC1, 4, 0xe00017d0, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVVC1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVQS1, 4, 0xe00017e0, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVQS1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVAL1, 4, 0xe00017f0, 0xe1f0ffff,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVAL1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MOVEQ2, 4, 0xf9b00000, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVEQ2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVNE2, 4, 0xf9b00100, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVNE2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVCC2, 4, 0xf9b00200, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVHS2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVCS2, 4, 0xf9b00300, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLO2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVGE2, 4, 0xf9b00400, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVGE2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVLT2, 4, 0xf9b00500, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLT2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVMI2, 4, 0xf9b00600, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVMI2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVPL2, 4, 0xf9b00700, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVPL2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVLS2, 4, 0xf9b00800, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLS2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVGT2, 4, 0xf9b00900, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVGT2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVLE2, 4, 0xf9b00a00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVLE2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVHI2, 4, 0xf9b00b00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVHI2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVVS2, 4, 0xf9b00c00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVVS2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVVC2, 4, 0xf9b00d00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVVC2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVQS2, 4, 0xf9b00e00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVQS2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MOVAL2, 4, 0xf9b00f00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVAL2],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MTDR, 4, 0xe7b00000, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MTDR],
++ BFD_RELOC_AVR32_8S_EXT, 2, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MTSR, 4, 0xe3b00000, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MTSR],
++ BFD_RELOC_AVR32_8S_EXT, 2, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MUL1, 2, 0xa1300000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MUL1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_MUL2, 4, 0xe0000240, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_MUL2],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MUL3, 4, 0xe0001000, 0xe1f0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_MUL3],
++ BFD_RELOC_AVR32_8S_EXT, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_MULHH_W, 4, 0xe0000780, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULHH_W],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULNHH_W, 4, 0xe0000180, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULNHH_W],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULNWH_D, 4, 0xe0000280, 0xe1f0ffe1,
++ &avr32_syntax_table[AVR32_SYNTAX_MULNWH_D],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULSD, 4, 0xe0000440, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULSD],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MULSATHH_H, 4, 0xe0000880, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULSATHH_H],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULSATHH_W, 4, 0xe0000980, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULSATHH_W],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULSATRNDHH_H, 4, 0xe0000a80, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULSATRNDHH_H],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULSATRNDWH_W, 4, 0xe0000b80, 0xe1f0ffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULSATRNDWH_W],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULSATWH_W, 4, 0xe0000e80, 0xe1f0ffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_MULSATWH_W],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MULU_D, 4, 0xe0000640, 0xe1f0fff1,
++ &avr32_syntax_table[AVR32_SYNTAX_MULU_D],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MULWH_D, 4, 0xe0000d80, 0xe1f0ffe1,
++ &avr32_syntax_table[AVR32_SYNTAX_MULWH_D],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_MUSFR, 2, 0x5d300000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MUSFR],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_MUSTR, 2, 0x5d200000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MUSTR],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_MVCR_D, 4, 0xefa00010, 0xfff111ff,
++ &avr32_syntax_table[AVR32_SYNTAX_MVCR_D],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ },
++ },
++ {
++ AVR32_OPC_MVCR_W, 4, 0xefa00000, 0xfff010ff,
++ &avr32_syntax_table[AVR32_SYNTAX_MVCR_W],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ },
++ },
++ {
++ AVR32_OPC_MVRC_D, 4, 0xefa00030, 0xfff111ff,
++ &avr32_syntax_table[AVR32_SYNTAX_MVRC_D],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ },
++ },
++ {
++ AVR32_OPC_MVRC_W, 4, 0xefa00020, 0xfff010ff,
++ &avr32_syntax_table[AVR32_SYNTAX_MVRC_W],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_NEG, 2, 0x5c300000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_NEG],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_NOP, 2, 0xd7030000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_NOP],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_OR1, 2, 0x00400000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_OR1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_OR2, 4, 0xe1e01000, 0xe1f0fe00,
++ &avr32_syntax_table[AVR32_SYNTAX_OR2],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E2],
++ },
++ },
++ {
++ AVR32_OPC_OR3, 4, 0xe1e01200, 0xe1f0fe00,
++ &avr32_syntax_table[AVR32_SYNTAX_OR3],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E2],
++ },
++ },
++ {
++ AVR32_OPC_ORH, 4, 0xea100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ORH],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_ORL, 4, 0xe8100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ORL],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_PABS_SB, 4, 0xe00023e0, 0xfff0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PABS_SB],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PABS_SH, 4, 0xe00023f0, 0xfff0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PABS_SH],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PACKSH_SB, 4, 0xe00024d0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PACKSH_SB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PACKSH_UB, 4, 0xe00024c0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PACKSH_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PACKW_SH, 4, 0xe0002470, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PACKW_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADD_B, 4, 0xe0002300, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADD_B],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADD_H, 4, 0xe0002000, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADD_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDH_SH, 4, 0xe00020c0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDH_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDH_UB, 4, 0xe0002360, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDH_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDS_SB, 4, 0xe0002320, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDS_SB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDS_SH, 4, 0xe0002040, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDS_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDS_UB, 4, 0xe0002340, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDS_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDS_UH, 4, 0xe0002080, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDS_UH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDSUB_H, 4, 0xe0002100, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDSUB_H],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PADDSUBH_SH, 4, 0xe0002280, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDSUBH_SH],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PADDSUBS_SH, 4, 0xe0002180, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDSUBS_SH],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PADDSUBS_UH, 4, 0xe0002200, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDSUBS_UH],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PADDX_H, 4, 0xe0002020, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDX_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDXH_SH, 4, 0xe00020e0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDXH_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDXS_SH, 4, 0xe0002060, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDXS_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PADDXS_UH, 4, 0xe00020a0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PADDXS_UH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PASR_B, 4, 0xe0002410, 0xe1f8fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PASR_B],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_COND3],
++ },
++ },
++ {
++ AVR32_OPC_PASR_H, 4, 0xe0002440, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PASR_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PAVG_SH, 4, 0xe00023d0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PAVG_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PAVG_UB, 4, 0xe00023c0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PAVG_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PLSL_B, 4, 0xe0002420, 0xe1f8fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PLSL_B],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_COND3],
++ },
++ },
++ {
++ AVR32_OPC_PLSL_H, 4, 0xe0002450, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PLSL_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PLSR_B, 4, 0xe0002430, 0xe1f8fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PLSR_B],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_COND3],
++ },
++ },
++ {
++ AVR32_OPC_PLSR_H, 4, 0xe0002460, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PLSR_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PMAX_SH, 4, 0xe0002390, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PMAX_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PMAX_UB, 4, 0xe0002380, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PMAX_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PMIN_SH, 4, 0xe00023b0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PMIN_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PMIN_UB, 4, 0xe00023a0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PMIN_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_POPJC, 2, 0xd7130000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_POPJC],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_POPM, 2, 0xd0020000, 0xf0070000,
++ &avr32_syntax_table[AVR32_SYNTAX_POPM],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_POPM],
++ },
++ },
++ {
++ AVR32_OPC_POPM_E, 4, 0xe3cd0000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_POPM_E],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_PREF, 4, 0xf2100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_PREF],
++ BFD_RELOC_AVR32_16S, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_PSAD, 4, 0xe0002400, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSAD],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUB_B, 4, 0xe0002310, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUB_B],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUB_H, 4, 0xe0002010, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUB_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBADD_H, 4, 0xe0002140, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBADD_H],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PSUBADDH_SH, 4, 0xe00022c0, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBADDH_SH],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PSUBADDS_SH, 4, 0xe00021c0, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBADDS_SH],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PSUBADDS_UH, 4, 0xe0002240, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBADDS_UH],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PSUBH_SH, 4, 0xe00020d0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBH_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBH_UB, 4, 0xe0002370, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBH_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBS_SB, 4, 0xe0002330, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBS_SB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBS_SH, 4, 0xe0002050, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBS_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBS_UB, 4, 0xe0002350, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBS_UB],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBS_UH, 4, 0xe0002090, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBS_UH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBX_H, 4, 0xe0002030, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBX_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBXH_SH, 4, 0xe00020f0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBXH_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBXS_SH, 4, 0xe0002070, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBXS_SH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PSUBXS_UH, 4, 0xe00020b0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_PSUBXS_UH],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_PUNPCKSB_H, 4, 0xe00024a0, 0xe1ffffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_PUNPCKSB_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PUNPCKUB_H, 4, 0xe0002480, 0xe1ffffe0,
++ &avr32_syntax_table[AVR32_SYNTAX_PUNPCKUB_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_PUSHJC, 2, 0xd7230000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_PUSHJC],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_PUSHM, 2, 0xd0010000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_PUSHM],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_PUSHM_E, 4, 0xebcd0000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_PUSHM_E],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_RCALL1, 2, 0xc00c0000, 0xf00c0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RCALL1],
++ BFD_RELOC_AVR32_11H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K10],
++ },
++ },
++ {
++ AVR32_OPC_RCALL2, 4, 0xe0a00000, 0xe1ef0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RCALL2],
++ BFD_RELOC_AVR32_22H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_RETEQ, 2, 0x5e000000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETEQ],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETNE, 2, 0x5e100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETNE],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETCC, 2, 0x5e200000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETHS],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETCS, 2, 0x5e300000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETLO],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETGE, 2, 0x5e400000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETGE],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETLT, 2, 0x5e500000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETLT],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETMI, 2, 0x5e600000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETMI],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETPL, 2, 0x5e700000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETPL],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETLS, 2, 0x5e800000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETLS],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETGT, 2, 0x5e900000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETGT],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETLE, 2, 0x5ea00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETLE],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETHI, 2, 0x5eb00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETHI],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETVS, 2, 0x5ec00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETVS],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETVC, 2, 0x5ed00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETVC],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETQS, 2, 0x5ee00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETQS],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETAL, 2, 0x5ef00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETAL],
++ BFD_RELOC_NONE, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_RETD, 2, 0xd6230000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETD],
++ BFD_RELOC_NONE, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_RETE, 2, 0xd6030000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETE],
++ BFD_RELOC_NONE, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_RETJ, 2, 0xd6330000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETJ],
++ BFD_RELOC_NONE, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_RETS, 2, 0xd6130000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETS],
++ BFD_RELOC_NONE, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_RJMP, 2, 0xc0080000, 0xf00c0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RJMP],
++ BFD_RELOC_AVR32_11H_PCREL, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K10],
++ },
++ },
++ {
++ AVR32_OPC_ROL, 2, 0x5cf00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ROL],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_ROR, 2, 0x5d000000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ROR],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_RSUB1, 2, 0x00200000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_RSUB1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_RSUB2, 4, 0xe0001100, 0xe1f0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_RSUB2],
++ BFD_RELOC_AVR32_8S_EXT, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SATADD_H, 4, 0xe00002c0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_SATADD_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SATADD_W, 4, 0xe00000c0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_SATADD_W],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SATRNDS, 4, 0xf3b00000, 0xfff0fc00,
++ &avr32_syntax_table[AVR32_SYNTAX_SATRNDS],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ &avr32_ifield_table[AVR32_IFIELD_S5],
++ },
++ },
++ {
++ AVR32_OPC_SATRNDU, 4, 0xf3b00400, 0xfff0fc00,
++ &avr32_syntax_table[AVR32_SYNTAX_SATRNDU],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ &avr32_ifield_table[AVR32_IFIELD_S5],
++ },
++ },
++ {
++ AVR32_OPC_SATS, 4, 0xf1b00000, 0xfff0fc00,
++ &avr32_syntax_table[AVR32_SYNTAX_SATS],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ &avr32_ifield_table[AVR32_IFIELD_S5],
++ },
++ },
++ {
++ AVR32_OPC_SATSUB_H, 4, 0xe00003c0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_SATSUB_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SATSUB_W1, 4, 0xe00001c0, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_SATSUB_W1],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SATSUB_W2, 4, 0xe0d00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SATSUB_W2],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_SATU, 4, 0xf1b00400, 0xfff0fc00,
++ &avr32_syntax_table[AVR32_SYNTAX_SATU],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ &avr32_ifield_table[AVR32_IFIELD_S5],
++ },
++ },
++ {
++ AVR32_OPC_SBC, 4, 0xe0000140, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_SBC],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SBR, 2, 0xa1a00000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SBR],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_BIT5C],
++ },
++ },
++ {
++ AVR32_OPC_SCALL, 2, 0xd7330000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_SCALL],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_SCR, 2, 0x5c100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SCR],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SLEEP, 4, 0xe9b00000, 0xffffff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SLEEP],
++ BFD_RELOC_AVR32_8S_EXT, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SREQ, 2, 0x5f000000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SREQ],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRNE, 2, 0x5f100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRNE],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRCC, 2, 0x5f200000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRHS],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRCS, 2, 0x5f300000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRLO],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRGE, 2, 0x5f400000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRGE],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRLT, 2, 0x5f500000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRLT],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRMI, 2, 0x5f600000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRMI],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRPL, 2, 0x5f700000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRPL],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRLS, 2, 0x5f800000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRLS],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRGT, 2, 0x5f900000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRGT],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRLE, 2, 0x5fa00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRLE],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRHI, 2, 0x5fb00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRHI],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRVS, 2, 0x5fc00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRVS],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRVC, 2, 0x5fd00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRVC],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRQS, 2, 0x5fe00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRQS],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SRAL, 2, 0x5ff00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SRAL],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SSRF, 2, 0xd2030000, 0xfe0f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_SSRF],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K5C],
++ },
++ },
++ {
++ AVR32_OPC_ST_B1, 2, 0x00c00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_B1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_B2, 2, 0x00f00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_B2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_B5, 4, 0xe0000b00, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_B5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ },
++ },
++ {
++ AVR32_OPC_ST_B3, 2, 0xa0800000, 0xe1800000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_B3],
++ BFD_RELOC_AVR32_3U, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K3],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_B4, 4, 0xe1600000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_B4],
++ BFD_RELOC_AVR32_16S, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_D1, 2, 0xa1200000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_D1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ },
++ },
++ {
++ AVR32_OPC_ST_D2, 2, 0xa1210000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_D2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ },
++ },
++ {
++ AVR32_OPC_ST_D3, 2, 0xa1110000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_D3],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ },
++ },
++ {
++ AVR32_OPC_ST_D5, 4, 0xe0000800, 0xe1f0ffc1,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_D5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ &avr32_ifield_table[AVR32_IFIELD_RD_DW],
++ },
++ },
++ {
++ AVR32_OPC_ST_D4, 4, 0xe0e10000, 0xe1f10000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_D4],
++ BFD_RELOC_AVR32_16S, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ &avr32_ifield_table[AVR32_IFIELD_RY_DW],
++ },
++ },
++ {
++ AVR32_OPC_ST_H1, 2, 0x00b00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_H1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_H2, 2, 0x00e00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_H2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_H5, 4, 0xe0000a00, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_H5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ },
++ },
++ {
++ AVR32_OPC_ST_H3, 2, 0xa0000000, 0xe1800000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_H3],
++ BFD_RELOC_AVR32_4UH, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K3],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_H4, 4, 0xe1500000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_H4],
++ BFD_RELOC_AVR32_16S, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_W1, 2, 0x00a00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_W1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_W2, 2, 0x00d00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_W2],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_W5, 4, 0xe0000900, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_W5],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ },
++ },
++ {
++ AVR32_OPC_ST_W3, 2, 0x81000000, 0xe1000000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_W3],
++ BFD_RELOC_AVR32_6UW, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K4],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_ST_W4, 4, 0xe1400000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_ST_W4],
++ BFD_RELOC_AVR32_16S, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_STC_D1, 4, 0xeba01000, 0xfff01100,
++ &avr32_syntax_table[AVR32_SYNTAX_STC_D1],
++ BFD_RELOC_AVR32_10UW, 4, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ },
++ },
++ {
++ AVR32_OPC_STC_D2, 4, 0xefa00070, 0xfff011f0,
++ &avr32_syntax_table[AVR32_SYNTAX_STC_D2],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ },
++ },
++ {
++ AVR32_OPC_STC_D3, 4, 0xefa010c0, 0xfff011c0,
++ &avr32_syntax_table[AVR32_SYNTAX_STC_D3],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ },
++ },
++ {
++ AVR32_OPC_STC_W1, 4, 0xeba00000, 0xfff01000,
++ &avr32_syntax_table[AVR32_SYNTAX_STC_W1],
++ BFD_RELOC_AVR32_10UW, 4, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ },
++ },
++ {
++ AVR32_OPC_STC_W2, 4, 0xefa00060, 0xfff010ff,
++ &avr32_syntax_table[AVR32_SYNTAX_STC_W2],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ },
++ },
++ {
++ AVR32_OPC_STC_W3, 4, 0xefa01080, 0xfff010c0,
++ &avr32_syntax_table[AVR32_SYNTAX_STC_W3],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ },
++ },
++ {
++ AVR32_OPC_STC0_D, 4, 0xf7a00000, 0xfff00100,
++ &avr32_syntax_table[AVR32_SYNTAX_STC0_D],
++ BFD_RELOC_AVR32_14UW, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K12CP],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_DW],
++ },
++ },
++ {
++ AVR32_OPC_STC0_W, 4, 0xf5a00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_STC0_W],
++ BFD_RELOC_AVR32_14UW, 3, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K12CP],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ },
++ },
++ {
++ AVR32_OPC_STCM_D, 4, 0xeda00500, 0xfff01f00,
++ &avr32_syntax_table[AVR32_SYNTAX_STCM_D],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_STCM_D_PU, 4, 0xeda01500, 0xfff01f00,
++ &avr32_syntax_table[AVR32_SYNTAX_STCM_D_PU],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_STCM_W, 4, 0xeda00200, 0xfff01e00,
++ &avr32_syntax_table[AVR32_SYNTAX_STCM_W],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_CM_HL],
++ },
++ },
++ {
++ AVR32_OPC_STCM_W_PU, 4, 0xeda01200, 0xfff01e00,
++ &avr32_syntax_table[AVR32_SYNTAX_STCM_W_PU],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_CPNO],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ &avr32_ifield_table[AVR32_IFIELD_CM_HL],
++ },
++ },
++ {
++ AVR32_OPC_STCOND, 4, 0xe1700000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_STCOND],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_STDSP, 2, 0x50000000, 0xf8000000,
++ &avr32_syntax_table[AVR32_SYNTAX_STDSP],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K7C],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_STHH_W2, 4, 0xe1e08000, 0xe1f0c0c0,
++ &avr32_syntax_table[AVR32_SYNTAX_STHH_W2],
++ BFD_RELOC_UNUSED, 7, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_CRD_RI],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X2],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y2],
++ },
++ },
++ {
++ AVR32_OPC_STHH_W1, 4, 0xe1e0c000, 0xe1f0c000,
++ &avr32_syntax_table[AVR32_SYNTAX_STHH_W1],
++ BFD_RELOC_AVR32_STHH_W, 6, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_K8E2],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X2],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y2],
++ },
++ },
++ {
++ AVR32_OPC_STM, 4, 0xe9c00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_STM],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_STM_PU, 4, 0xebc00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_STM_PU],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_STMTS, 4, 0xedc00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_STMTS],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_STMTS_PU, 4, 0xefc00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_STMTS_PU],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_STSWP_H, 4, 0xe1d09000, 0xe1f0f000,
++ &avr32_syntax_table[AVR32_SYNTAX_STSWP_H],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K12],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_STSWP_W, 4, 0xe1d0a000, 0xe1f0f000,
++ &avr32_syntax_table[AVR32_SYNTAX_STSWP_W],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K12],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_SUB1, 2, 0x00100000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SUB1],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_SUB2, 4, 0xe0000100, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_SUB2],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K2],
++ },
++ },
++ {
++ AVR32_OPC_SUB5, 4, 0xe0c00000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SUB5],
++ BFD_RELOC_AVR32_SUB5, 3, 2,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_SUB3_SP, 2, 0x200d0000, 0xf00f0000,
++ &avr32_syntax_table[AVR32_SYNTAX_SUB3_SP],
++ BFD_RELOC_AVR32_10SW, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_SUB3, 2, 0x20000000, 0xf0000000,
++ &avr32_syntax_table[AVR32_SYNTAX_SUB3],
++ BFD_RELOC_AVR32_8S, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8C],
++ },
++ },
++ {
++ AVR32_OPC_SUB4, 4, 0xe0200000, 0xe1e00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SUB4],
++ BFD_RELOC_AVR32_21S, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K21],
++ },
++ },
++ {
++ AVR32_OPC_SUBEQ, 4, 0xf7b00000, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBEQ],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBNE, 4, 0xf7b00100, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBNE],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBCC, 4, 0xf7b00200, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBHS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBCS, 4, 0xf7b00300, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBLO],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBGE, 4, 0xf7b00400, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBGE],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBLT, 4, 0xf7b00500, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBLT],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBMI, 4, 0xf7b00600, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBMI],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBPL, 4, 0xf7b00700, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBPL],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBLS, 4, 0xf7b00800, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBLS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBGT, 4, 0xf7b00900, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBGT],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBLE, 4, 0xf7b00a00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBLE],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBHI, 4, 0xf7b00b00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBHI],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBVS, 4, 0xf7b00c00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBVS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBVC, 4, 0xf7b00d00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBVC],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBQS, 4, 0xf7b00e00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBQS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBAL, 4, 0xf7b00f00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBAL],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFEQ, 4, 0xf5b00000, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFEQ],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFNE, 4, 0xf5b00100, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFNE],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFCC, 4, 0xf5b00200, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFHS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFCS, 4, 0xf5b00300, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFLO],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFGE, 4, 0xf5b00400, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFGE],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFLT, 4, 0xf5b00500, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFLT],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFMI, 4, 0xf5b00600, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFMI],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFPL, 4, 0xf5b00700, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFPL],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFLS, 4, 0xf5b00800, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFLS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFGT, 4, 0xf5b00900, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFGT],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFLE, 4, 0xf5b00a00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFLE],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFHI, 4, 0xf5b00b00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFHI],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFVS, 4, 0xf5b00c00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFVS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFVC, 4, 0xf5b00d00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFVC],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFQS, 4, 0xf5b00e00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFQS],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBFAL, 4, 0xf5b00f00, 0xfff0ff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBFAL],
++ BFD_RELOC_AVR32_8S_EXT, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ },
++ },
++ {
++ AVR32_OPC_SUBHH_W, 4, 0xe0000f00, 0xe1f0ffc0,
++ &avr32_syntax_table[AVR32_SYNTAX_SUBHH_W],
++ BFD_RELOC_UNUSED, 5, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_X],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_Y],
++ },
++ },
++ {
++ AVR32_OPC_SWAP_B, 2, 0x5cb00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SWAP_B],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_SWAP_BH, 2, 0x5cc00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SWAP_BH],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_SWAP_H, 2, 0x5ca00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_SWAP_H],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_SYNC, 4, 0xebb00000, 0xffffff00,
++ &avr32_syntax_table[AVR32_SYNTAX_SYNC],
++ BFD_RELOC_AVR32_8S_EXT, 1, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_K8E],
++ }
++ },
++ {
++ AVR32_OPC_TLBR, 2, 0xd6430000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_TLBR],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_TLBS, 2, 0xd6530000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_TLBS],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_TLBW, 2, 0xd6630000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_TLBW],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_TNBZ, 2, 0x5ce00000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_TNBZ],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ }
++ },
++ {
++ AVR32_OPC_TST, 2, 0x00700000, 0xe1f00000,
++ &avr32_syntax_table[AVR32_SYNTAX_TST],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ },
++ },
++ {
++ AVR32_OPC_XCHG, 4, 0xe0000b40, 0xe1f0fff0,
++ &avr32_syntax_table[AVR32_SYNTAX_XCHG],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RD_E],
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ },
++ },
++ {
++ AVR32_OPC_MEMC, 4, 0xf6100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MEMC],
++ BFD_RELOC_AVR32_15S, 2, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_MEM15],
++ &avr32_ifield_table[AVR32_IFIELD_MEMB5],
++ },
++ },
++ {
++ AVR32_OPC_MEMS, 4, 0xf8100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MEMS],
++ BFD_RELOC_AVR32_15S, 2, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_MEM15],
++ &avr32_ifield_table[AVR32_IFIELD_MEMB5],
++ },
++ },
++ {
++ AVR32_OPC_MEMT, 4, 0xfa100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MEMT],
++ BFD_RELOC_AVR32_15S, 2, 0,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_MEM15],
++ &avr32_ifield_table[AVR32_IFIELD_MEMB5],
++ },
++ },
++ {
++ AVR32_OPC_BFEXTS, 4, 0xe1d0b000, 0xe1f0fc00,
++ &avr32_syntax_table[AVR32_SYNTAX_BFEXTS],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_S5],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++ {
++ AVR32_OPC_BFEXTU, 4, 0xe1d0c000, 0xe1f0fc00,
++ &avr32_syntax_table[AVR32_SYNTAX_BFEXTU],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_S5],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++ {
++ AVR32_OPC_BFINS, 4, 0xe1d0d000, 0xe1f0fc00,
++ &avr32_syntax_table[AVR32_SYNTAX_BFINS],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RX],
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_S5],
++ &avr32_ifield_table[AVR32_IFIELD_K5E],
++ },
++ },
++#define AVR32_OPCODE_RSUBCOND(cond_name, cond_field) \
++ { \
++ AVR32_OPC_RSUB ## cond_name , 4, \
++ 0xfbb00000 | (cond_field << 8), 0xfff0ff00, \
++ &avr32_syntax_table[AVR32_SYNTAX_RSUB ## cond_name ], \
++ BFD_RELOC_AVR32_8S_EXT, 2, 1, \
++ { \
++ &avr32_ifield_table[AVR32_IFIELD_RY], \
++ &avr32_ifield_table[AVR32_IFIELD_K8E], \
++ }, \
++ },
++
++ AVR32_OPCODE_RSUBCOND (EQ, 0)
++ AVR32_OPCODE_RSUBCOND (NE, 1)
++ AVR32_OPCODE_RSUBCOND (CC, 2)
++ AVR32_OPCODE_RSUBCOND (CS, 3)
++ AVR32_OPCODE_RSUBCOND (GE, 4)
++ AVR32_OPCODE_RSUBCOND (LT, 5)
++ AVR32_OPCODE_RSUBCOND (MI, 6)
++ AVR32_OPCODE_RSUBCOND (PL, 7)
++ AVR32_OPCODE_RSUBCOND (LS, 8)
++ AVR32_OPCODE_RSUBCOND (GT, 9)
++ AVR32_OPCODE_RSUBCOND (LE, 10)
++ AVR32_OPCODE_RSUBCOND (HI, 11)
++ AVR32_OPCODE_RSUBCOND (VS, 12)
++ AVR32_OPCODE_RSUBCOND (VC, 13)
++ AVR32_OPCODE_RSUBCOND (QS, 14)
++ AVR32_OPCODE_RSUBCOND (AL, 15)
++
++#define AVR32_OPCODE_OP3_COND(op_name, op_field, cond_name, cond_field) \
++ { \
++ AVR32_OPC_ ## op_name ## cond_name , 4, \
++ 0xe1d0e000 | (cond_field << 8) | (op_field << 4), 0xe1f0fff0, \
++ &avr32_syntax_table[AVR32_SYNTAX_ ## op_name ## cond_name ], \
++ BFD_RELOC_UNUSED, 3, -1, \
++ { \
++ &avr32_ifield_table[AVR32_IFIELD_RD_E], \
++ &avr32_ifield_table[AVR32_IFIELD_RX], \
++ &avr32_ifield_table[AVR32_IFIELD_RY], \
++ }, \
++ },
++
++ AVR32_OPCODE_OP3_COND (ADD, 0, EQ, 0)
++ AVR32_OPCODE_OP3_COND (ADD, 0, NE, 1)
++ AVR32_OPCODE_OP3_COND (ADD, 0, CC, 2)
++ AVR32_OPCODE_OP3_COND (ADD, 0, CS, 3)
++ AVR32_OPCODE_OP3_COND (ADD, 0, GE, 4)
++ AVR32_OPCODE_OP3_COND (ADD, 0, LT, 5)
++ AVR32_OPCODE_OP3_COND (ADD, 0, MI, 6)
++ AVR32_OPCODE_OP3_COND (ADD, 0, PL, 7)
++ AVR32_OPCODE_OP3_COND (ADD, 0, LS, 8)
++ AVR32_OPCODE_OP3_COND (ADD, 0, GT, 9)
++ AVR32_OPCODE_OP3_COND (ADD, 0, LE, 10)
++ AVR32_OPCODE_OP3_COND (ADD, 0, HI, 11)
++ AVR32_OPCODE_OP3_COND (ADD, 0, VS, 12)
++ AVR32_OPCODE_OP3_COND (ADD, 0, VC, 13)
++ AVR32_OPCODE_OP3_COND (ADD, 0, QS, 14)
++ AVR32_OPCODE_OP3_COND (ADD, 0, AL, 15)
++
++ AVR32_OPCODE_OP3_COND (SUB2, 1, EQ, 0)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, NE, 1)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, CC, 2)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, CS, 3)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, GE, 4)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, LT, 5)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, MI, 6)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, PL, 7)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, LS, 8)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, GT, 9)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, LE, 10)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, HI, 11)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, VS, 12)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, VC, 13)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, QS, 14)
++ AVR32_OPCODE_OP3_COND (SUB2, 1, AL, 15)
++
++ AVR32_OPCODE_OP3_COND (AND, 2, EQ, 0)
++ AVR32_OPCODE_OP3_COND (AND, 2, NE, 1)
++ AVR32_OPCODE_OP3_COND (AND, 2, CC, 2)
++ AVR32_OPCODE_OP3_COND (AND, 2, CS, 3)
++ AVR32_OPCODE_OP3_COND (AND, 2, GE, 4)
++ AVR32_OPCODE_OP3_COND (AND, 2, LT, 5)
++ AVR32_OPCODE_OP3_COND (AND, 2, MI, 6)
++ AVR32_OPCODE_OP3_COND (AND, 2, PL, 7)
++ AVR32_OPCODE_OP3_COND (AND, 2, LS, 8)
++ AVR32_OPCODE_OP3_COND (AND, 2, GT, 9)
++ AVR32_OPCODE_OP3_COND (AND, 2, LE, 10)
++ AVR32_OPCODE_OP3_COND (AND, 2, HI, 11)
++ AVR32_OPCODE_OP3_COND (AND, 2, VS, 12)
++ AVR32_OPCODE_OP3_COND (AND, 2, VC, 13)
++ AVR32_OPCODE_OP3_COND (AND, 2, QS, 14)
++ AVR32_OPCODE_OP3_COND (AND, 2, AL, 15)
++
++ AVR32_OPCODE_OP3_COND (OR, 3, EQ, 0)
++ AVR32_OPCODE_OP3_COND (OR, 3, NE, 1)
++ AVR32_OPCODE_OP3_COND (OR, 3, CC, 2)
++ AVR32_OPCODE_OP3_COND (OR, 3, CS, 3)
++ AVR32_OPCODE_OP3_COND (OR, 3, GE, 4)
++ AVR32_OPCODE_OP3_COND (OR, 3, LT, 5)
++ AVR32_OPCODE_OP3_COND (OR, 3, MI, 6)
++ AVR32_OPCODE_OP3_COND (OR, 3, PL, 7)
++ AVR32_OPCODE_OP3_COND (OR, 3, LS, 8)
++ AVR32_OPCODE_OP3_COND (OR, 3, GT, 9)
++ AVR32_OPCODE_OP3_COND (OR, 3, LE, 10)
++ AVR32_OPCODE_OP3_COND (OR, 3, HI, 11)
++ AVR32_OPCODE_OP3_COND (OR, 3, VS, 12)
++ AVR32_OPCODE_OP3_COND (OR, 3, VC, 13)
++ AVR32_OPCODE_OP3_COND (OR, 3, QS, 14)
++ AVR32_OPCODE_OP3_COND (OR, 3, AL, 15)
++
++ AVR32_OPCODE_OP3_COND (EOR, 4, EQ, 0)
++ AVR32_OPCODE_OP3_COND (EOR, 4, NE, 1)
++ AVR32_OPCODE_OP3_COND (EOR, 4, CC, 2)
++ AVR32_OPCODE_OP3_COND (EOR, 4, CS, 3)
++ AVR32_OPCODE_OP3_COND (EOR, 4, GE, 4)
++ AVR32_OPCODE_OP3_COND (EOR, 4, LT, 5)
++ AVR32_OPCODE_OP3_COND (EOR, 4, MI, 6)
++ AVR32_OPCODE_OP3_COND (EOR, 4, PL, 7)
++ AVR32_OPCODE_OP3_COND (EOR, 4, LS, 8)
++ AVR32_OPCODE_OP3_COND (EOR, 4, GT, 9)
++ AVR32_OPCODE_OP3_COND (EOR, 4, LE, 10)
++ AVR32_OPCODE_OP3_COND (EOR, 4, HI, 11)
++ AVR32_OPCODE_OP3_COND (EOR, 4, VS, 12)
++ AVR32_OPCODE_OP3_COND (EOR, 4, VC, 13)
++ AVR32_OPCODE_OP3_COND (EOR, 4, QS, 14)
++ AVR32_OPCODE_OP3_COND (EOR, 4, AL, 15)
++
++#define AVR32_OPCODE_LD_COND(op_name, op_field, cond_name, cond_field) \
++ { \
++ AVR32_OPC_ ## op_name ## cond_name , 4, \
++ 0xe1f00000 | (cond_field << 12) | (op_field << 9), 0xe1f0fe00, \
++ &avr32_syntax_table[AVR32_SYNTAX_ ## op_name ## cond_name ], \
++ BFD_RELOC_UNUSED, 3, -1, \
++ { \
++ &avr32_ifield_table[AVR32_IFIELD_RY], \
++ &avr32_ifield_table[AVR32_IFIELD_RX], \
++ &avr32_ifield_table[AVR32_IFIELD_K9E], \
++ }, \
++ },
++
++#define AVR32_OPCODE_ST_COND(op_name, op_field, cond_name, cond_field) \
++ { \
++ AVR32_OPC_ ## op_name ## cond_name , 4, \
++ 0xe1f00000 | (cond_field << 12) | (op_field << 9), 0xe1f0fe00, \
++ &avr32_syntax_table[AVR32_SYNTAX_ ## op_name ## cond_name ], \
++ BFD_RELOC_UNUSED, 3, -1, \
++ { \
++ &avr32_ifield_table[AVR32_IFIELD_RX], \
++ &avr32_ifield_table[AVR32_IFIELD_K9E], \
++ &avr32_ifield_table[AVR32_IFIELD_RY], \
++ }, \
++ },
++
++ AVR32_OPCODE_LD_COND (LD_W, 0, EQ, 0)
++ AVR32_OPCODE_LD_COND (LD_W, 0, NE, 1)
++ AVR32_OPCODE_LD_COND (LD_W, 0, CC, 2)
++ AVR32_OPCODE_LD_COND (LD_W, 0, CS, 3)
++ AVR32_OPCODE_LD_COND (LD_W, 0, GE, 4)
++ AVR32_OPCODE_LD_COND (LD_W, 0, LT, 5)
++ AVR32_OPCODE_LD_COND (LD_W, 0, MI, 6)
++ AVR32_OPCODE_LD_COND (LD_W, 0, PL, 7)
++ AVR32_OPCODE_LD_COND (LD_W, 0, LS, 8)
++ AVR32_OPCODE_LD_COND (LD_W, 0, GT, 9)
++ AVR32_OPCODE_LD_COND (LD_W, 0, LE, 10)
++ AVR32_OPCODE_LD_COND (LD_W, 0, HI, 11)
++ AVR32_OPCODE_LD_COND (LD_W, 0, VS, 12)
++ AVR32_OPCODE_LD_COND (LD_W, 0, VC, 13)
++ AVR32_OPCODE_LD_COND (LD_W, 0, QS, 14)
++ AVR32_OPCODE_LD_COND (LD_W, 0, AL, 15)
++
++ AVR32_OPCODE_LD_COND (LD_SH, 1, EQ, 0)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, NE, 1)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, CC, 2)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, CS, 3)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, GE, 4)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, LT, 5)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, MI, 6)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, PL, 7)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, LS, 8)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, GT, 9)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, LE, 10)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, HI, 11)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, VS, 12)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, VC, 13)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, QS, 14)
++ AVR32_OPCODE_LD_COND (LD_SH, 1, AL, 15)
++
++ AVR32_OPCODE_LD_COND (LD_UH, 2, EQ, 0)
++ AVR32_OPCODE_LD_COND (LD_UH, 2, NE, 1)
++ AVR32_OPCODE_LD_COND (LD_UH, 2, CC, 2)
++ AVR32_OPCODE_LD_COND (LD_UH, 2, CS, 3)
++ AVR32_OPCODE_LD_COND (LD_UH, 2, GE, 4)
++ AVR32_OPCODE_LD_COND (LD_UH, 2, LT, 5)
++ AVR32_OPCODE_LD_COND (LD_UH, 2, MI, 6)
++ AVR32_OPCODE_LD_COND (LD_UH, 2, PL, 7)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, LS, 8)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, GT, 9)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, LE, 10)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, HI, 11)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, VS, 12)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, VC, 13)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, QS, 14)
++ AVR32_OPCODE_LD_COND (LD_SH, 2, AL, 15)
++
++ AVR32_OPCODE_LD_COND (LD_SB, 3, EQ, 0)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, NE, 1)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, CC, 2)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, CS, 3)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, GE, 4)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, LT, 5)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, MI, 6)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, PL, 7)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, LS, 8)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, GT, 9)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, LE, 10)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, HI, 11)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, VS, 12)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, VC, 13)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, QS, 14)
++ AVR32_OPCODE_LD_COND (LD_SB, 3, AL, 15)
++
++ AVR32_OPCODE_LD_COND (LD_UB, 4, EQ, 0)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, NE, 1)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, CC, 2)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, CS, 3)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, GE, 4)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, LT, 5)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, MI, 6)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, PL, 7)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, LS, 8)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, GT, 9)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, LE, 10)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, HI, 11)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, VS, 12)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, VC, 13)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, QS, 14)
++ AVR32_OPCODE_LD_COND (LD_UB, 4, AL, 15)
++
++ AVR32_OPCODE_ST_COND (ST_W, 5, EQ, 0)
++ AVR32_OPCODE_ST_COND (ST_W, 5, NE, 1)
++ AVR32_OPCODE_ST_COND (ST_W, 5, CC, 2)
++ AVR32_OPCODE_ST_COND (ST_W, 5, CS, 3)
++ AVR32_OPCODE_ST_COND (ST_W, 5, GE, 4)
++ AVR32_OPCODE_ST_COND (ST_W, 5, LT, 5)
++ AVR32_OPCODE_ST_COND (ST_W, 5, MI, 6)
++ AVR32_OPCODE_ST_COND (ST_W, 5, PL, 7)
++ AVR32_OPCODE_ST_COND (ST_W, 5, LS, 8)
++ AVR32_OPCODE_ST_COND (ST_W, 5, GT, 9)
++ AVR32_OPCODE_ST_COND (ST_W, 5, LE, 10)
++ AVR32_OPCODE_ST_COND (ST_W, 5, HI, 11)
++ AVR32_OPCODE_ST_COND (ST_W, 5, VS, 12)
++ AVR32_OPCODE_ST_COND (ST_W, 5, VC, 13)
++ AVR32_OPCODE_ST_COND (ST_W, 5, QS, 14)
++ AVR32_OPCODE_ST_COND (ST_W, 5, AL, 15)
++
++ AVR32_OPCODE_ST_COND (ST_H, 6, EQ, 0)
++ AVR32_OPCODE_ST_COND (ST_H, 6, NE, 1)
++ AVR32_OPCODE_ST_COND (ST_H, 6, CC, 2)
++ AVR32_OPCODE_ST_COND (ST_H, 6, CS, 3)
++ AVR32_OPCODE_ST_COND (ST_H, 6, GE, 4)
++ AVR32_OPCODE_ST_COND (ST_H, 6, LT, 5)
++ AVR32_OPCODE_ST_COND (ST_H, 6, MI, 6)
++ AVR32_OPCODE_ST_COND (ST_H, 6, PL, 7)
++ AVR32_OPCODE_ST_COND (ST_H, 6, LS, 8)
++ AVR32_OPCODE_ST_COND (ST_H, 6, GT, 9)
++ AVR32_OPCODE_ST_COND (ST_H, 6, LE, 10)
++ AVR32_OPCODE_ST_COND (ST_H, 6, HI, 11)
++ AVR32_OPCODE_ST_COND (ST_H, 6, VS, 12)
++ AVR32_OPCODE_ST_COND (ST_H, 6, VC, 13)
++ AVR32_OPCODE_ST_COND (ST_H, 6, QS, 14)
++ AVR32_OPCODE_ST_COND (ST_H, 6, AL, 15)
++
++ AVR32_OPCODE_ST_COND (ST_B, 7, EQ, 0)
++ AVR32_OPCODE_ST_COND (ST_B, 7, NE, 1)
++ AVR32_OPCODE_ST_COND (ST_B, 7, CC, 2)
++ AVR32_OPCODE_ST_COND (ST_B, 7, CS, 3)
++ AVR32_OPCODE_ST_COND (ST_B, 7, GE, 4)
++ AVR32_OPCODE_ST_COND (ST_B, 7, LT, 5)
++ AVR32_OPCODE_ST_COND (ST_B, 7, MI, 6)
++ AVR32_OPCODE_ST_COND (ST_B, 7, PL, 7)
++ AVR32_OPCODE_ST_COND (ST_B, 7, LS, 8)
++ AVR32_OPCODE_ST_COND (ST_B, 7, GT, 9)
++ AVR32_OPCODE_ST_COND (ST_B, 7, LE, 10)
++ AVR32_OPCODE_ST_COND (ST_B, 7, HI, 11)
++ AVR32_OPCODE_ST_COND (ST_B, 7, VS, 12)
++ AVR32_OPCODE_ST_COND (ST_B, 7, VC, 13)
++ AVR32_OPCODE_ST_COND (ST_B, 7, QS, 14)
++ AVR32_OPCODE_ST_COND (ST_B, 7, AL, 15)
++
++ {
++ AVR32_OPC_MOVH, 4, 0xfc100000, 0xfff00000,
++ &avr32_syntax_table[AVR32_SYNTAX_MOVH],
++ BFD_RELOC_AVR32_16U, 2, 1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_RY],
++ &avr32_ifield_table[AVR32_IFIELD_K16],
++ },
++ },
++ {
++ AVR32_OPC_SSCALL, 2, 0xd7530000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_SSCALL],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++ {
++ AVR32_OPC_RETSS, 2, 0xd7630000, 0xffff0000,
++ &avr32_syntax_table[AVR32_SYNTAX_RETSS],
++ BFD_RELOC_UNUSED, 0, -1, { NULL },
++ },
++
++ {
++ AVR32_OPC_FMAC_S, 4, 0xE1A00000, 0xFFF0F000,
++ &avr32_syntax_table[AVR32_SYNTAX_FMAC_S],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RA],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FNMAC_S, 4, 0xE1A01000, 0xFFF0F000,
++ &avr32_syntax_table[AVR32_SYNTAX_FNMAC_S],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RA],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FMSC_S, 4, 0xE3A00000, 0xFFF0F000,
++ &avr32_syntax_table[AVR32_SYNTAX_FMSC_S],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RA],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FNMSC_S, 4, 0xE3A01000, 0xFFF0F000,
++ &avr32_syntax_table[AVR32_SYNTAX_FNMSC_S],
++ BFD_RELOC_UNUSED, 4, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RA],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FMUL_S, 4, 0xE5A20000, 0xFFFFF000,
++ &avr32_syntax_table[AVR32_SYNTAX_FMUL_S],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FNMUL_S, 4, 0xE5A30000, 0xFFFFF000,
++ &avr32_syntax_table[AVR32_SYNTAX_FNMUL_S],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FADD_S, 4, 0xE5A00000, 0xFFFFF000,
++ &avr32_syntax_table[AVR32_SYNTAX_FADD_S],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FSUB_S, 4, 0xE5A10000, 0xFFFFF000,
++ &avr32_syntax_table[AVR32_SYNTAX_FSUB_S],
++ BFD_RELOC_UNUSED, 3, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FCASTRS_SW, 4, 0xE5AB0000, 0xFFFFF0F0,
++ &avr32_syntax_table[AVR32_SYNTAX_FCASTRS_SW],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FCASTRS_UW, 4, 0xE5A90000, 0xFFFFF0F0,
++ &avr32_syntax_table[AVR32_SYNTAX_FCASTRS_UW],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FCASTSW_S, 4, 0xE5A60000, 0xFFFFF0F0,
++ &avr32_syntax_table[AVR32_SYNTAX_FCASTSW_S],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FCASTUW_S, 4, 0xE5A40000, 0xFFFFF0F0,
++ &avr32_syntax_table[AVR32_SYNTAX_FCASTUW_S],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FCMP_S, 4, 0xE5AC0000, 0xFFFFFF00,
++ &avr32_syntax_table[AVR32_SYNTAX_FCMP_S],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RX],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FCHK_S, 4, 0xE5AD0000, 0xFFFFFFF0,
++ &avr32_syntax_table[AVR32_SYNTAX_FCHK_S],
++ BFD_RELOC_UNUSED, 1, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FRCPA_S, 4, 0xE5AE0000, 0xFFFFF0F0,
++ &avr32_syntax_table[AVR32_SYNTAX_FRCPA_S],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ },
++ {
++ AVR32_OPC_FRSQRTA_S, 4, 0xE5AF0000, 0xFFFFF0F0,
++ &avr32_syntax_table[AVR32_SYNTAX_FRSQRTA_S],
++ BFD_RELOC_UNUSED, 2, -1,
++ {
++ &avr32_ifield_table[AVR32_IFIELD_FP_RD],
++ &avr32_ifield_table[AVR32_IFIELD_FP_RY]
++ }
++ }
++
++};
++
++
++const struct avr32_alias avr32_alias_table[] =
++ {
++ {
++ AVR32_ALIAS_PICOSVMAC0,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x0c },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSVMAC1,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x0d },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSVMAC2,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x0e },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSVMAC3,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x0f },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSVMUL0,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x08 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSVMUL1,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x09 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSVMUL2,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x0a },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSVMUL3,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x0b },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMAC0,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x04 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMAC1,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x05 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMAC2,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x06 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMAC3,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x07 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMUL0,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x00 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMUL1,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x01 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMUL2,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x02 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOVMUL3,
++ &avr32_opc_table[AVR32_OPC_COP],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ { 0, 0x03 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLD_D1,
++ &avr32_opc_table[AVR32_OPC_LDC_D1],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLD_D2,
++ &avr32_opc_table[AVR32_OPC_LDC_D2],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLD_D3,
++ &avr32_opc_table[AVR32_OPC_LDC_D3],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 }, { 1, 3 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLD_W1,
++ &avr32_opc_table[AVR32_OPC_LDC_W1],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLD_W2,
++ &avr32_opc_table[AVR32_OPC_LDC_W2],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLD_W3,
++ &avr32_opc_table[AVR32_OPC_LDC_W3],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 }, { 1, 3 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLDM_D,
++ &avr32_opc_table[AVR32_OPC_LDCM_D],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLDM_D_PU,
++ &avr32_opc_table[AVR32_OPC_LDCM_D_PU],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLDM_W,
++ &avr32_opc_table[AVR32_OPC_LDCM_W],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOLDM_W_PU,
++ &avr32_opc_table[AVR32_OPC_LDCM_W_PU],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOMV_D1,
++ &avr32_opc_table[AVR32_OPC_MVCR_D],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOMV_D2,
++ &avr32_opc_table[AVR32_OPC_MVRC_D],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOMV_W1,
++ &avr32_opc_table[AVR32_OPC_MVCR_W],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOMV_W2,
++ &avr32_opc_table[AVR32_OPC_MVRC_W],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOST_D1,
++ &avr32_opc_table[AVR32_OPC_STC_D1],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOST_D2,
++ &avr32_opc_table[AVR32_OPC_STC_D2],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOST_D3,
++ &avr32_opc_table[AVR32_OPC_STC_D3],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 }, { 1, 3 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOST_W1,
++ &avr32_opc_table[AVR32_OPC_STC_W1],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOST_W2,
++ &avr32_opc_table[AVR32_OPC_STC_W2],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOST_W3,
++ &avr32_opc_table[AVR32_OPC_STC_W3],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 }, { 1, 3 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSTM_D,
++ &avr32_opc_table[AVR32_OPC_STCM_D],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSTM_D_PU,
++ &avr32_opc_table[AVR32_OPC_STCM_D_PU],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSTM_W,
++ &avr32_opc_table[AVR32_OPC_STCM_W],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ },
++ },
++ {
++ AVR32_ALIAS_PICOSTM_W_PU,
++ &avr32_opc_table[AVR32_OPC_STCM_W_PU],
++ {
++ { 0, PICO_CPNO },
++ { 1, 0 }, { 1, 1 }, { 1, 2 },
++ },
++ },
++ };
++
++
++#define SYNTAX_NORMAL0(id, mne, opc, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, 0, { } \
++ }
++#define SYNTAX_NORMAL1(id, mne, opc, op0, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, 1, \
++ { \
++ AVR32_OPERAND_##op0, \
++ } \
++ }
++#define SYNTAX_NORMALM1(id, mne, opc, op0, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, -1, \
++ { \
++ AVR32_OPERAND_##op0, \
++ } \
++ }
++#define SYNTAX_NORMAL2(id, mne, opc, op0, op1, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, 2, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ } \
++ }
++#define SYNTAX_NORMALM2(id, mne, opc, op0, op1, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, -2, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ } \
++ }
++#define SYNTAX_NORMAL3(id, mne, opc, op0, op1, op2, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, 3, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ AVR32_OPERAND_##op2, \
++ } \
++ }
++#define SYNTAX_NORMALM3(id, mne, opc, op0, op1, op2, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, -3, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ AVR32_OPERAND_##op2, \
++ } \
++ }
++#define SYNTAX_NORMAL4(id, mne, opc, op0, op1, op2, op3, arch)\
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, 4, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ AVR32_OPERAND_##op2, AVR32_OPERAND_##op3, \
++ } \
++ }
++#define SYNTAX_NORMAL5(id, mne, opc, op0, op1, op2, op3, op4, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ NULL, 5, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ AVR32_OPERAND_##op2, AVR32_OPERAND_##op3, \
++ AVR32_OPERAND_##op4, \
++ } \
++ }
++
++#define SYNTAX_NORMAL_C1(id, mne, opc, nxt, op0, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ &avr32_syntax_table[AVR32_SYNTAX_##nxt], 1, \
++ { \
++ AVR32_OPERAND_##op0, \
++ } \
++ }
++#define SYNTAX_NORMAL_CM1(id, mne, opc, nxt, op0, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ &avr32_syntax_table[AVR32_SYNTAX_##nxt], -1, \
++ { \
++ AVR32_OPERAND_##op0, \
++ } \
++ }
++#define SYNTAX_NORMAL_C2(id, mne, opc, nxt, op0, op1, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ &avr32_syntax_table[AVR32_SYNTAX_##nxt], 2, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ } \
++ }
++#define SYNTAX_NORMAL_CM2(id, mne, opc, nxt, op0, op1, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ &avr32_syntax_table[AVR32_SYNTAX_##nxt], -2, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ } \
++ }
++#define SYNTAX_NORMAL_C3(id, mne, opc, nxt, op0, op1, op2, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ &avr32_syntax_table[AVR32_SYNTAX_##nxt], 3, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ AVR32_OPERAND_##op2, \
++ } \
++ }
++#define SYNTAX_NORMAL_CM3(id, mne, opc, nxt, op0, op1, op2, arch) \
++ { \
++ AVR32_SYNTAX_##id, arch, \
++ &avr32_mnemonic_table[AVR32_MNEMONIC_##mne], \
++ AVR32_PARSER_NORMAL, \
++ { &avr32_opc_table[AVR32_OPC_##opc], }, \
++ &avr32_syntax_table[AVR32_SYNTAX_##nxt], -3, \
++ { \
++ AVR32_OPERAND_##op0, AVR32_OPERAND_##op1, \
++ AVR32_OPERAND_##op2, \
++ } \
++ }
++
++
++const struct avr32_syntax avr32_syntax_table[] =
++ {
++ SYNTAX_NORMAL1(ABS, ABS, ABS, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(ACALL, ACALL, ACALL, UNSIGNED_CONST_W, AVR32_V1),
++ SYNTAX_NORMAL1(ACR, ACR, ACR, INTREG,AVR32_V1),
++ SYNTAX_NORMAL3(ADC, ADC, ADC, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ADD1, ADD, ADD1, ADD2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(ADD2, ADD, ADD2, INTREG, INTREG, INTREG_LSL, AVR32_V1),
++ SYNTAX_NORMAL3(ADDABS, ADDABS, ADDABS, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(ADDHH_W, ADDHH_W, ADDHH_W, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL_C2(AND1, AND, AND1, AND2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(AND2, AND, AND2, AND3, INTREG, INTREG, INTREG_LSL, AVR32_V1),
++ SYNTAX_NORMAL3(AND3, AND, AND3, INTREG, INTREG, INTREG_LSR, AVR32_V1),
++ SYNTAX_NORMAL_C2(ANDH, ANDH, ANDH, ANDH_COH, INTREG, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL3(ANDH_COH, ANDH, ANDH_COH, INTREG, UNSIGNED_CONST, COH, AVR32_V1),
++ SYNTAX_NORMAL_C2(ANDL, ANDL, ANDL, ANDL_COH, INTREG, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL3(ANDL_COH, ANDL, ANDL_COH, INTREG, UNSIGNED_CONST, COH, AVR32_V1),
++ SYNTAX_NORMAL2(ANDN, ANDN, ANDN, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(ASR1, ASR, ASR1, ASR3, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(ASR3, ASR, ASR3, ASR2, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL2(ASR2, ASR, ASR2, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL4(BFEXTS, BFEXTS, BFEXTS, INTREG, INTREG, UNSIGNED_NUMBER, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL4(BFEXTU, BFEXTU, BFEXTU, INTREG, INTREG, UNSIGNED_NUMBER, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL4(BFINS, BFINS, BFINS, INTREG, INTREG, UNSIGNED_NUMBER, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL2(BLD, BLD, BLD, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL_C1(BREQ1, BREQ, BREQ1, BREQ2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRNE1, BRNE, BRNE1, BRNE2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRCC1, BRCC, BRCC1, BRCC2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRCS1, BRCS, BRCS1, BRCS2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRGE1, BRGE, BRGE1, BRGE2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRLT1, BRLT, BRLT1, BRLT2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRMI1, BRMI, BRMI1, BRMI2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRPL1, BRPL, BRPL1, BRPL2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRHS1, BRHS, BRCC1, BRHS2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL_C1(BRLO1, BRLO, BRCS1, BRLO2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BREQ2, BREQ, BREQ2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRNE2, BRNE, BRNE2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRCC2, BRCC, BRCC2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRCS2, BRCS, BRCS2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRGE2, BRGE, BRGE2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRLT2, BRLT, BRLT2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRMI2, BRMI, BRMI2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRPL2, BRPL, BRPL2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRLS, BRLS, BRLS, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRGT, BRGT, BRGT, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRLE, BRLE, BRLE, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRHI, BRHI, BRHI, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRVS, BRVS, BRVS, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRVC, BRVC, BRVC, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRQS, BRQS, BRQS, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRAL, BRAL, BRAL, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRHS2, BRHS, BRCC2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(BRLO2, BRLO, BRCS2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL0(BREAKPOINT, BREAKPOINT, BREAKPOINT, AVR32_V1),
++ SYNTAX_NORMAL1(BREV, BREV, BREV, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(BST, BST, BST, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL2(CACHE, CACHE, CACHE, INTREG_SDISP, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL1(CASTS_B, CASTS_B, CASTS_B, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(CASTS_H, CASTS_H, CASTS_H, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(CASTU_B, CASTU_B, CASTU_B, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(CASTU_H, CASTU_H, CASTU_H, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(CBR, CBR, CBR, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL2(CLZ, CLZ, CLZ, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(COM, COM, COM, INTREG, AVR32_V1),
++ SYNTAX_NORMAL5(COP, COP, COP, CPNO, CPREG, CPREG, CPREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL2(CP_B, CP_B, CP_B, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(CP_H, CP_H, CP_H, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(CP_W1, CP_W, CP_W1, CP_W2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(CP_W2, CP_W, CP_W2, CP_W3, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(CP_W3, CP_W, CP_W3, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(CPC1, CPC, CPC1, CPC2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(CPC2, CPC, CPC2, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(CSRF, CSRF, CSRF, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL1(CSRFCZ, CSRFCZ, CSRFCZ, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL3(DIVS, DIVS, DIVS, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(DIVU, DIVU, DIVU, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(EOR1, EOR, EOR1, EOR2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(EOR2, EOR, EOR2, EOR3, INTREG, INTREG, INTREG_LSL, AVR32_V1),
++ SYNTAX_NORMAL3(EOR3, EOR, EOR3, INTREG, INTREG, INTREG_LSR, AVR32_V1),
++ SYNTAX_NORMAL2(EORL, EORL, EORL, INTREG, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(EORH, EORH, EORH, INTREG, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL0(FRS, FRS, FRS, AVR32_V1),
++ SYNTAX_NORMAL0(SSCALL, SSCALL, SSCALL, AVR32_V3),
++ SYNTAX_NORMAL0(RETSS, RETSS, RETSS, AVR32_V3),
++ SYNTAX_NORMAL1(ICALL, ICALL, ICALL, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(INCJOSP, INCJOSP, INCJOSP, JOSPINC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_D1, LD_D, LD_D1, LD_D2, DWREG, INTREG_POSTINC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_D2, LD_D, LD_D2, LD_D3, DWREG, INTREG_PREDEC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_D3, LD_D, LD_D3, LD_D5, DWREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_D5, LD_D, LD_D5, LD_D4, DWREG, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL2(LD_D4, LD_D, LD_D4, DWREG, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_SB2, LD_SB, LD_SB2, LD_SB1, INTREG, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL2(LD_SB1, LD_SB, LD_SB1, INTREG, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UB1, LD_UB, LD_UB1, LD_UB2, INTREG, INTREG_POSTINC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UB2, LD_UB, LD_UB2, LD_UB5, INTREG, INTREG_PREDEC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UB5, LD_UB, LD_UB5, LD_UB3, INTREG, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UB3, LD_UB, LD_UB3, LD_UB4, INTREG, INTREG_UDISP, AVR32_V1),
++ SYNTAX_NORMAL2(LD_UB4, LD_UB, LD_UB4, INTREG, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_SH1, LD_SH, LD_SH1, LD_SH2, INTREG, INTREG_POSTINC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_SH2, LD_SH, LD_SH2, LD_SH5, INTREG, INTREG_PREDEC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_SH5, LD_SH, LD_SH5, LD_SH3, INTREG, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_SH3, LD_SH, LD_SH3, LD_SH4, INTREG, INTREG_UDISP_H, AVR32_V1),
++ SYNTAX_NORMAL2(LD_SH4, LD_SH, LD_SH4, INTREG, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UH1, LD_UH, LD_UH1, LD_UH2, INTREG, INTREG_POSTINC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UH2, LD_UH, LD_UH2, LD_UH5, INTREG, INTREG_PREDEC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UH5, LD_UH, LD_UH5, LD_UH3, INTREG, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_UH3, LD_UH, LD_UH3, LD_UH4, INTREG, INTREG_UDISP_H, AVR32_V1),
++ SYNTAX_NORMAL2(LD_UH4, LD_UH, LD_UH4, INTREG, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_W1, LD_W, LD_W1, LD_W2, INTREG, INTREG_POSTINC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_W2, LD_W, LD_W2, LD_W5, INTREG, INTREG_PREDEC, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_W5, LD_W, LD_W5, LD_W6, INTREG, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_W6, LD_W, LD_W6, LD_W3, INTREG, INTREG_XINDEX, AVR32_V1),
++ SYNTAX_NORMAL_C2(LD_W3, LD_W, LD_W3, LD_W4, INTREG, INTREG_UDISP_W, AVR32_V1),
++ SYNTAX_NORMAL2(LD_W4, LD_W, LD_W4, INTREG, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL3(LDC_D1, LDC_D, LDC_D1, CPNO, CPREG_D, INTREG_UDISP_W, AVR32_V1),
++ SYNTAX_NORMAL_C3(LDC_D2, LDC_D, LDC_D2, LDC_D1, CPNO, CPREG_D, INTREG_PREDEC, AVR32_V1),
++ SYNTAX_NORMAL_C3(LDC_D3, LDC_D, LDC_D3, LDC_D2, CPNO, CPREG_D, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL3(LDC_W1, LDC_W, LDC_W1, CPNO, CPREG, INTREG_UDISP_W, AVR32_V1),
++ SYNTAX_NORMAL_C3(LDC_W2, LDC_W, LDC_W2, LDC_W1, CPNO, CPREG, INTREG_PREDEC, AVR32_V1),
++ SYNTAX_NORMAL_C3(LDC_W3, LDC_W, LDC_W3, LDC_W2, CPNO, CPREG, INTREG_INDEX, AVR32_V1),
++ SYNTAX_NORMAL2(LDC0_D, LDC0_D, LDC0_D, CPREG_D, INTREG_UDISP_W, AVR32_V1),
++ SYNTAX_NORMAL2(LDC0_W, LDC0_W, LDC0_W, CPREG, INTREG_UDISP_W, AVR32_V1),
++ SYNTAX_NORMAL_CM3(LDCM_D, LDCM_D, LDCM_D, LDCM_D_PU, CPNO, INTREG, REGLIST_CPD8, AVR32_V1),
++ SYNTAX_NORMALM3(LDCM_D_PU, LDCM_D, LDCM_D_PU, CPNO, INTREG_POSTINC, REGLIST_CPD8, AVR32_V1),
++ SYNTAX_NORMAL_CM3(LDCM_W, LDCM_W, LDCM_W, LDCM_W_PU, CPNO, INTREG, REGLIST_CP8, AVR32_V1),
++ SYNTAX_NORMALM3(LDCM_W_PU, LDCM_W, LDCM_W_PU, CPNO, INTREG_POSTINC, REGLIST_CP8, AVR32_V1),
++ SYNTAX_NORMAL2(LDDPC, LDDPC, LDDPC, INTREG, PC_UDISP_W, AVR32_V1),
++ SYNTAX_NORMAL2(LDDPC_EXT, LDDPC, LDDPC_EXT, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(LDDSP, LDDSP, LDDSP, INTREG, SP_UDISP_W, AVR32_V1),
++ SYNTAX_NORMAL2(LDINS_B, LDINS_B, LDINS_B, INTREG_BSEL, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL2(LDINS_H, LDINS_H, LDINS_H, INTREG_HSEL, INTREG_SDISP_H, AVR32_V1),
++ SYNTAX_NORMALM1(LDM, LDM, LDM, REGLIST_LDM, AVR32_V1),
++ SYNTAX_NORMAL_CM2(LDMTS, LDMTS, LDMTS, LDMTS_PU, INTREG, REGLIST16, AVR32_V1),
++ SYNTAX_NORMALM2(LDMTS_PU, LDMTS, LDMTS_PU, INTREG_POSTINC, REGLIST16, AVR32_V1),
++ SYNTAX_NORMAL2(LDSWP_SH, LDSWP_SH, LDSWP_SH, INTREG, INTREG_SDISP_H, AVR32_V1),
++ SYNTAX_NORMAL2(LDSWP_UH, LDSWP_UH, LDSWP_UH, INTREG, INTREG_SDISP_H, AVR32_V1),
++ SYNTAX_NORMAL2(LDSWP_W, LDSWP_W, LDSWP_W, INTREG, INTREG_SDISP_W, AVR32_V1),
++ SYNTAX_NORMAL_C3(LSL1, LSL, LSL1, LSL3, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(LSL3, LSL, LSL3, LSL2, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL2(LSL2, LSL, LSL2, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL_C3(LSR1, LSR, LSR1, LSR3, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(LSR3, LSR, LSR3, LSR2, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL2(LSR2, LSR, LSR2, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL3(MAC, MAC, MAC, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(MACHH_D, MACHH_D, MACHH_D, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MACHH_W, MACHH_W, MACHH_W, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MACS_D, MACS_D, MACS_D, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(MACSATHH_W, MACSATHH_W, MACSATHH_W, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MACUD, MACU_D, MACUD, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(MACWH_D, MACWH_D, MACWH_D, INTREG, INTREG, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MAX, MAX, MAX, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(MCALL, MCALL, MCALL, MCALL, AVR32_V1),
++ SYNTAX_NORMAL2(MFDR, MFDR, MFDR, INTREG, UNSIGNED_CONST_W, AVR32_V1),
++ SYNTAX_NORMAL2(MFSR, MFSR, MFSR, INTREG, UNSIGNED_CONST_W, AVR32_V1),
++ SYNTAX_NORMAL3(MIN, MIN, MIN, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOV3, MOV, MOV3, MOV1, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOV1, MOV, MOV1, MOV2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOV2, MOV, MOV2,INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVEQ1, MOVEQ, MOVEQ1, MOVEQ2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVNE1, MOVNE, MOVNE1, MOVNE2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVCC1, MOVCC, MOVCC1, MOVCC2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVCS1, MOVCS, MOVCS1, MOVCS2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVGE1, MOVGE, MOVGE1, MOVGE2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVLT1, MOVLT, MOVLT1, MOVLT2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVMI1, MOVMI, MOVMI1, MOVMI2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVPL1, MOVPL, MOVPL1, MOVPL2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVLS1, MOVLS, MOVLS1, MOVLS2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVGT1, MOVGT, MOVGT1, MOVGT2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVLE1, MOVLE, MOVLE1, MOVLE2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVHI1, MOVHI, MOVHI1, MOVHI2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVVS1, MOVVS, MOVVS1, MOVVS2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVVC1, MOVVC, MOVVC1, MOVVC2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVQS1, MOVQS, MOVQS1, MOVQS2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVAL1, MOVAL, MOVAL1, MOVAL2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVHS1, MOVHS, MOVCC1, MOVHS2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MOVLO1, MOVLO, MOVCS1, MOVLO2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(MOVEQ2, MOVEQ, MOVEQ2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVNE2, MOVNE, MOVNE2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVCC2, MOVCC, MOVCC2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVCS2, MOVCS, MOVCS2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVGE2, MOVGE, MOVGE2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVLT2, MOVLT, MOVLT2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVMI2, MOVMI, MOVMI2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVPL2, MOVPL, MOVPL2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVLS2, MOVLS, MOVLS2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVGT2, MOVGT, MOVGT2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVLE2, MOVLE, MOVLE2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVHI2, MOVHI, MOVHI2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVVS2, MOVVS, MOVVS2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVVC2, MOVVC, MOVVC2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVQS2, MOVQS, MOVQS2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVAL2, MOVAL, MOVAL2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVHS2, MOVHS, MOVCC2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MOVLO2, MOVLO, MOVCS2, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(MTDR, MTDR, MTDR, UNSIGNED_CONST_W, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(MTSR, MTSR, MTSR, UNSIGNED_CONST_W, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(MUL1, MUL, MUL1, MUL2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(MUL2, MUL, MUL2, MUL3, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(MUL3, MUL, MUL3, INTREG, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL3(MULHH_W, MULHH_W, MULHH_W, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULNHH_W, MULNHH_W, MULNHH_W, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULNWH_D, MULNWH_D, MULNWH_D, INTREG, INTREG, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULSD, MULS_D, MULSD, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(MULSATHH_H, MULSATHH_H, MULSATHH_H, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULSATHH_W, MULSATHH_W, MULSATHH_W, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULSATRNDHH_H, MULSATRNDHH_H, MULSATRNDHH_H, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULSATRNDWH_W, MULSATRNDWH_W, MULSATRNDWH_W, INTREG, INTREG, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULSATWH_W, MULSATWH_W, MULSATWH_W, INTREG, INTREG, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL3(MULU_D, MULU_D, MULU_D, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(MULWH_D, MULWH_D, MULWH_D, INTREG, INTREG, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL1(MUSFR, MUSFR, MUSFR, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(MUSTR, MUSTR, MUSTR, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(MVCR_D, MVCR_D, MVCR_D, CPNO, DWREG, CPREG_D, AVR32_V1),
++ SYNTAX_NORMAL3(MVCR_W, MVCR_W, MVCR_W, CPNO, INTREG, CPREG, AVR32_V1),
++ SYNTAX_NORMAL3(MVRC_D, MVRC_D, MVRC_D, CPNO, CPREG_D, DWREG, AVR32_V1),
++ SYNTAX_NORMAL3(MVRC_W, MVRC_W, MVRC_W, CPNO, CPREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(NEG, NEG, NEG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL0(NOP, NOP, NOP, AVR32_V1),
++ SYNTAX_NORMAL_C2(OR1, OR, OR1, OR2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(OR2, OR, OR2, OR3, INTREG, INTREG, INTREG_LSL, AVR32_V1),
++ SYNTAX_NORMAL3(OR3, OR, OR3, INTREG, INTREG, INTREG_LSR, AVR32_V1),
++ SYNTAX_NORMAL2(ORH, ORH, ORH, INTREG, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(ORL, ORL, ORL, INTREG, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(PABS_SB, PABS_SB, PABS_SB, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL2(PABS_SH, PABS_SH, PABS_SH, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PACKSH_SB, PACKSH_SB, PACKSH_SB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PACKSH_UB, PACKSH_UB, PACKSH_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PACKW_SH, PACKW_SH, PACKW_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADD_B, PADD_B, PADD_B, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADD_H, PADD_H, PADD_H, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDH_SH, PADDH_SH, PADDH_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDH_UB, PADDH_UB, PADDH_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDS_SB, PADDS_SB, PADDS_SB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDS_SH, PADDS_SH, PADDS_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDS_UB, PADDS_UB, PADDS_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDS_UH, PADDS_UH, PADDS_UH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDSUB_H, PADDSUB_H, PADDSUB_H, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDSUBH_SH, PADDSUBH_SH, PADDSUBH_SH, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDSUBS_SH, PADDSUBS_SH, PADDSUBS_SH, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDSUBS_UH, PADDSUBS_UH, PADDSUBS_UH, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDX_H, PADDX_H, PADDX_H, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDXH_SH, PADDXH_SH, PADDXH_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDXS_SH, PADDXS_SH, PADDXS_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PADDXS_UH, PADDXS_UH, PADDXS_UH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PASR_B, PASR_B, PASR_B, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_SIMD),
++ SYNTAX_NORMAL3(PASR_H, PASR_H, PASR_H, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_SIMD),
++ SYNTAX_NORMAL3(PAVG_SH, PAVG_SH, PAVG_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PAVG_UB, PAVG_UB, PAVG_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PLSL_B, PLSL_B, PLSL_B, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_SIMD),
++ SYNTAX_NORMAL3(PLSL_H, PLSL_H, PLSL_H, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_SIMD),
++ SYNTAX_NORMAL3(PLSR_B, PLSR_B, PLSR_B, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_SIMD),
++ SYNTAX_NORMAL3(PLSR_H, PLSR_H, PLSR_H, INTREG, INTREG, UNSIGNED_NUMBER, AVR32_SIMD),
++ SYNTAX_NORMAL3(PMAX_SH, PMAX_SH, PMAX_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PMAX_UB, PMAX_UB, PMAX_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PMIN_SH, PMIN_SH, PMIN_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PMIN_UB, PMIN_UB, PMIN_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL0(POPJC, POPJC, POPJC, AVR32_V1),
++ SYNTAX_NORMAL_CM1(POPM, POPM, POPM, POPM_E, REGLIST9, AVR32_V1),
++ SYNTAX_NORMALM1(POPM_E, POPM, POPM_E, REGLIST16, AVR32_V1),
++ SYNTAX_NORMAL1(PREF, PREF, PREF, INTREG_SDISP, AVR32_V1),
++ SYNTAX_NORMAL3(PSAD, PSAD, PSAD, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUB_B, PSUB_B, PSUB_B, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUB_H, PSUB_H, PSUB_H, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBADD_H, PSUBADD_H, PSUBADD_H, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBADDH_SH, PSUBADDH_SH, PSUBADDH_SH, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBADDS_SH, PSUBADDS_SH, PSUBADDS_SH, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBADDS_UH, PSUBADDS_UH, PSUBADDS_UH, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBH_SH, PSUBH_SH, PSUBH_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBH_UB, PSUBH_UB, PSUBH_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBS_SB, PSUBS_SB, PSUBS_SB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBS_SH, PSUBS_SH, PSUBS_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBS_UB, PSUBS_UB, PSUBS_UB, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBS_UH, PSUBS_UH, PSUBS_UH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBX_H, PSUBX_H, PSUBX_H, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBXH_SH, PSUBXH_SH, PSUBXH_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBXS_SH, PSUBXS_SH, PSUBXS_SH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL3(PSUBXS_UH, PSUBXS_UH, PSUBXS_UH, INTREG, INTREG, INTREG, AVR32_SIMD),
++ SYNTAX_NORMAL2(PUNPCKSB_H, PUNPCKSB_H, PUNPCKSB_H, INTREG, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL2(PUNPCKUB_H, PUNPCKUB_H, PUNPCKUB_H, INTREG, INTREG_HSEL, AVR32_SIMD),
++ SYNTAX_NORMAL0(PUSHJC, PUSHJC, PUSHJC, AVR32_V1),
++ SYNTAX_NORMAL_CM1(PUSHM, PUSHM, PUSHM, PUSHM_E, REGLIST8, AVR32_V1),
++ SYNTAX_NORMALM1(PUSHM_E, PUSHM, PUSHM_E, REGLIST16, AVR32_V1),
++ SYNTAX_NORMAL_C1(RCALL1, RCALL, RCALL1, RCALL2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(RCALL2, RCALL, RCALL2, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(RETEQ, RETEQ, RETEQ, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETNE, RETNE, RETNE, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETCC, RETCC, RETCC, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETCS, RETCS, RETCS, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETGE, RETGE, RETGE, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETLT, RETLT, RETLT, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETMI, RETMI, RETMI, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETPL, RETPL, RETPL, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETLS, RETLS, RETLS, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETGT, RETGT, RETGT, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETLE, RETLE, RETLE, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETHI, RETHI, RETHI, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETVS, RETVS, RETVS, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETVC, RETVC, RETVC, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETQS, RETQS, RETQS, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETAL, RETAL, RETAL, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETHS, RETHS, RETCC, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL1(RETLO, RETLO, RETCS, RETVAL, AVR32_V1),
++ SYNTAX_NORMAL0(RETD, RETD, RETD, AVR32_V1),
++ SYNTAX_NORMAL0(RETE, RETE, RETE, AVR32_V1),
++ SYNTAX_NORMAL0(RETJ, RETJ, RETJ, AVR32_V1),
++ SYNTAX_NORMAL0(RETS, RETS, RETS, AVR32_V1),
++ SYNTAX_NORMAL1(RJMP, RJMP, RJMP, JMPLABEL, AVR32_V1),
++ SYNTAX_NORMAL1(ROL, ROL, ROL, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(ROR, ROR, ROR, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(RSUB1, RSUB, RSUB1, RSUB2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(RSUB2, RSUB, RSUB2, INTREG, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL3(SATADD_H, SATADD_H, SATADD_H, INTREG, INTREG, INTREG, AVR32_DSP),
++ SYNTAX_NORMAL3(SATADD_W, SATADD_W, SATADD_W, INTREG, INTREG, INTREG, AVR32_DSP),
++ SYNTAX_NORMAL2(SATRNDS, SATRNDS, SATRNDS, INTREG_LSR, UNSIGNED_NUMBER, AVR32_DSP),
++ SYNTAX_NORMAL2(SATRNDU, SATRNDU, SATRNDU, INTREG_LSR, UNSIGNED_NUMBER, AVR32_DSP),
++ SYNTAX_NORMAL2(SATS, SATS, SATS, INTREG_LSR, UNSIGNED_NUMBER, AVR32_DSP),
++ SYNTAX_NORMAL3(SATSUB_H, SATSUB_H, SATSUB_H, INTREG, INTREG, INTREG, AVR32_DSP),
++ SYNTAX_NORMAL_C3(SATSUB_W1, SATSUB_W, SATSUB_W1, SATSUB_W2, INTREG, INTREG, INTREG, AVR32_DSP),
++ SYNTAX_NORMAL3(SATSUB_W2, SATSUB_W, SATSUB_W2, INTREG, INTREG, SIGNED_CONST, AVR32_DSP),
++ SYNTAX_NORMAL2(SATU, SATU, SATU, INTREG_LSR, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL3(SBC, SBC, SBC, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(SBR, SBR, SBR, INTREG, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL0(SCALL, SCALL, SCALL, AVR32_V1),
++ SYNTAX_NORMAL1(SCR, SCR, SCR, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SLEEP, SLEEP, SLEEP, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL1(SREQ, SREQ, SREQ, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRNE, SRNE, SRNE, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRCC, SRCC, SRCC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRCS, SRCS, SRCS, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRGE, SRGE, SRGE, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRLT, SRLT, SRLT, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRMI, SRMI, SRMI, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRPL, SRPL, SRPL, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRLS, SRLS, SRLS, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRGT, SRGT, SRGT, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRLE, SRLE, SRLE, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRHI, SRHI, SRHI, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRVS, SRVS, SRVS, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRVC, SRVC, SRVC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRQS, SRQS, SRQS, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRAL, SRAL, SRAL, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRHS, SRHS, SRCC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SRLO, SRLO, SRCS, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SSRF, SSRF, SSRF, UNSIGNED_NUMBER, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_B1, ST_B, ST_B1, ST_B2, INTREG_POSTINC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_B2, ST_B, ST_B2, ST_B5, INTREG_PREDEC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_B5, ST_B, ST_B5, ST_B3, INTREG_INDEX, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_B3, ST_B, ST_B3, ST_B4, INTREG_UDISP, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(ST_B4, ST_B, ST_B4, INTREG_SDISP, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_D1, ST_D, ST_D1, ST_D2, INTREG_POSTINC, DWREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_D2, ST_D, ST_D2, ST_D3, INTREG_PREDEC, DWREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_D3, ST_D, ST_D3, ST_D5, INTREG, DWREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_D5, ST_D, ST_D5, ST_D4, INTREG_INDEX, DWREG, AVR32_V1),
++ SYNTAX_NORMAL2(ST_D4, ST_D, ST_D4, INTREG_SDISP, DWREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_H1, ST_H, ST_H1, ST_H2, INTREG_POSTINC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_H2, ST_H, ST_H2, ST_H5, INTREG_PREDEC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_H5, ST_H, ST_H5, ST_H3, INTREG_INDEX, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_H3, ST_H, ST_H3, ST_H4, INTREG_UDISP_H, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(ST_H4, ST_H, ST_H4, INTREG_SDISP, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_W1, ST_W, ST_W1, ST_W2, INTREG_POSTINC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_W2, ST_W, ST_W2, ST_W5, INTREG_PREDEC, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_W5, ST_W, ST_W5, ST_W3, INTREG_INDEX, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(ST_W3, ST_W, ST_W3, ST_W4, INTREG_UDISP_W, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(ST_W4, ST_W, ST_W4, INTREG_SDISP, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(STC_D1, STC_D, STC_D1, CPNO, INTREG_UDISP_W, CPREG_D, AVR32_V1),
++ SYNTAX_NORMAL_C3(STC_D2, STC_D, STC_D2, STC_D1, CPNO, INTREG_POSTINC, CPREG_D, AVR32_V1),
++ SYNTAX_NORMAL_C3(STC_D3, STC_D, STC_D3, STC_D2, CPNO, INTREG_INDEX, CPREG_D, AVR32_V1),
++ SYNTAX_NORMAL3(STC_W1, STC_W, STC_W1, CPNO, INTREG_UDISP_W, CPREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(STC_W2, STC_W, STC_W2, STC_W1, CPNO, INTREG_POSTINC, CPREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(STC_W3, STC_W, STC_W3, STC_W2, CPNO, INTREG_INDEX, CPREG, AVR32_V1),
++ SYNTAX_NORMAL2(STC0_D, STC0_D, STC0_D, INTREG_UDISP_W, CPREG_D, AVR32_V1),
++ SYNTAX_NORMAL2(STC0_W, STC0_W, STC0_W, INTREG_UDISP_W, CPREG, AVR32_V1),
++ SYNTAX_NORMAL_CM3(STCM_D, STCM_D, STCM_D, STCM_D_PU, CPNO, INTREG, REGLIST_CPD8, AVR32_V1),
++ SYNTAX_NORMALM3(STCM_D_PU, STCM_D, STCM_D_PU, CPNO, INTREG_PREDEC, REGLIST_CPD8, AVR32_V1),
++ SYNTAX_NORMAL_CM3(STCM_W, STCM_W, STCM_W, STCM_W_PU, CPNO, INTREG, REGLIST_CP8, AVR32_V1),
++ SYNTAX_NORMALM3(STCM_W_PU, STCM_W, STCM_W_PU, CPNO, INTREG_PREDEC, REGLIST_CP8, AVR32_V1),
++ SYNTAX_NORMAL2(STCOND, STCOND, STCOND, INTREG_SDISP, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(STDSP, STDSP, STDSP, SP_UDISP_W, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(STHH_W2, STHH_W, STHH_W2, STHH_W1, INTREG_INDEX, INTREG_HSEL, INTREG_HSEL, AVR32_V1),
++ SYNTAX_NORMAL3(STHH_W1, STHH_W, STHH_W1, INTREG_UDISP_W, INTREG_HSEL, INTREG_HSEL, AVR32_V1),
++ SYNTAX_NORMAL_CM2(STM, STM, STM, STM_PU, INTREG, REGLIST16, AVR32_V1),
++ SYNTAX_NORMALM2(STM_PU, STM, STM_PU, INTREG_PREDEC, REGLIST16, AVR32_V1),
++ SYNTAX_NORMAL_CM2(STMTS, STMTS, STMTS, STMTS_PU, INTREG, REGLIST16, AVR32_V1),
++ SYNTAX_NORMALM2(STMTS_PU, STMTS, STMTS_PU, INTREG_PREDEC, REGLIST16, AVR32_V1),
++ SYNTAX_NORMAL2(STSWP_H, STSWP_H, STSWP_H, INTREG_SDISP_H, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(STSWP_W, STSWP_W, STSWP_W, INTREG_SDISP_W, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUB1, SUB, SUB1, SUB2, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL_C3(SUB2, SUB, SUB2, SUB5, INTREG, INTREG, INTREG_LSL, AVR32_V1),
++ SYNTAX_NORMAL_C3(SUB5, SUB, SUB5, SUB3_SP, INTREG, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUB3_SP, SUB, SUB3_SP, SUB3, SP, SIGNED_CONST_W, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUB3, SUB, SUB3, SUB4, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUB4, SUB, SUB4, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBEQ, SUBEQ, SUBEQ, SUB2EQ, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBNE, SUBNE, SUBNE, SUB2NE, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBCC, SUBCC, SUBCC, SUB2CC, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBCS, SUBCS, SUBCS, SUB2CS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBGE, SUBGE, SUBGE, SUB2GE, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBLT, SUBLT, SUBLT, SUB2LT, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBMI, SUBMI, SUBMI, SUB2MI, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBPL, SUBPL, SUBPL, SUB2PL, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBLS, SUBLS, SUBLS, SUB2LS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBGT, SUBGT, SUBGT, SUB2GT, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBLE, SUBLE, SUBLE, SUB2LE, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBHI, SUBHI, SUBHI, SUB2HI, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBVS, SUBVS, SUBVS, SUB2VS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBVC, SUBVC, SUBVC, SUB2VC, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBQS, SUBQS, SUBQS, SUB2QS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBAL, SUBAL, SUBAL, SUB2AL, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBHS, SUBHS, SUBCC, SUB2CC, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL_C2(SUBLO, SUBLO, SUBCS, SUB2CS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFEQ, SUBFEQ, SUBFEQ, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFNE, SUBFNE, SUBFNE, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFCC, SUBFCC, SUBFCC, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFCS, SUBFCS, SUBFCS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFGE, SUBFGE, SUBFGE, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFLT, SUBFLT, SUBFLT, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFMI, SUBFMI, SUBFMI, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFPL, SUBFPL, SUBFPL, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFLS, SUBFLS, SUBFLS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFGT, SUBFGT, SUBFGT, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFLE, SUBFLE, SUBFLE, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFHI, SUBFHI, SUBFHI, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFVS, SUBFVS, SUBFVS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFVC, SUBFVC, SUBFVC, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFQS, SUBFQS, SUBFQS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFAL, SUBFAL, SUBFAL, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFHS, SUBFHS, SUBFCC, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(SUBFLO, SUBFLO, SUBFCS, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL3(SUBHH_W, SUBHH_W, SUBHH_W, INTREG, INTREG_HSEL, INTREG_HSEL, AVR32_DSP),
++ SYNTAX_NORMAL1(SWAP_B, SWAP_B, SWAP_B, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SWAP_BH, SWAP_BH, SWAP_BH, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SWAP_H, SWAP_H, SWAP_H, INTREG, AVR32_V1),
++ SYNTAX_NORMAL1(SYNC, SYNC, SYNC, UNSIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL0(TLBR, TLBR, TLBR, AVR32_V1),
++ SYNTAX_NORMAL0(TLBS, TLBS, TLBS, AVR32_V1),
++ SYNTAX_NORMAL0(TLBW, TLBW, TLBW, AVR32_V1),
++ SYNTAX_NORMAL1(TNBZ, TNBZ, TNBZ, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(TST, TST, TST, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL3(XCHG, XCHG, XCHG, INTREG, INTREG, INTREG, AVR32_V1),
++ SYNTAX_NORMAL2(MEMC, MEMC, MEMC, SIGNED_CONST_W, UNSIGNED_NUMBER, AVR32_RMW),
++ SYNTAX_NORMAL2(MEMS, MEMS, MEMS, SIGNED_CONST_W, UNSIGNED_NUMBER, AVR32_RMW),
++ SYNTAX_NORMAL2(MEMT, MEMT, MEMT, SIGNED_CONST_W, UNSIGNED_NUMBER, AVR32_RMW),
++ SYNTAX_NORMAL4 (FMAC_S, FMAC_S, FMAC_S, INTREG, INTREG, INTREG, INTREG,
++ AVR32_V3FP),
++ SYNTAX_NORMAL4 (FNMAC_S, FNMAC_S, FNMAC_S, INTREG, INTREG, INTREG, INTREG,
++ AVR32_V3FP),
++ SYNTAX_NORMAL4 (FMSC_S, FMSC_S, FMSC_S, INTREG, INTREG, INTREG, INTREG,
++ AVR32_V3FP),
++ SYNTAX_NORMAL4 (FNMSC_S, FNMSC_S, FNMSC_S, INTREG, INTREG, INTREG, INTREG,
++ AVR32_V3FP),
++ SYNTAX_NORMAL3 (FMUL_S, FMUL_S, FMUL_S, INTREG, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL3 (FNMUL_S, FNMUL_S, FNMUL_S, INTREG, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL3 (FADD_S, FADD_S, FADD_S, INTREG, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL3 (FSUB_S, FSUB_S, FSUB_S, INTREG, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL2 (FCASTRS_SW, FCASTRS_SW, FCASTRS_SW, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL2 (FCASTRS_UW, FCASTRS_UW, FCASTRS_UW, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL2 (FCASTSW_S, FCASTSW_S, FCASTSW_S, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL2 (FCASTUW_S, FCASTUW_S, FCASTUW_S, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL2 (FCMP_S, FCMP_S, FCMP_S, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL1 (FCHK_S, FCHK_S, FCHK_S, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL2 (FRCPA_S, FRCPA_S, FRCPA_S, INTREG, INTREG, AVR32_V3FP),
++ SYNTAX_NORMAL2 (FRSQRTA_S, FRSQRTA_S, FRSQRTA_S, INTREG, INTREG, AVR32_V3FP),
++ {
++ AVR32_SYNTAX_LDA_W,
++ AVR32_V1, NULL, AVR32_PARSER_LDA,
++ { NULL }, NULL,
++ 2,
++ {
++ AVR32_OPERAND_INTREG,
++ AVR32_OPERAND_SIGNED_CONST,
++ },
++ },
++ {
++ AVR32_SYNTAX_CALL,
++ AVR32_V1, NULL, AVR32_PARSER_CALL,
++ { NULL }, NULL,
++ 1,
++ {
++ AVR32_OPERAND_JMPLABEL,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMAC0,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMAC0] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSVMAC1], 4,
++ {
++ AVR32_OPERAND_PICO_OUT0,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMAC1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMAC1] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSVMAC2], 4,
++ {
++ AVR32_OPERAND_PICO_OUT1,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMAC2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMAC2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSVMAC3], 4,
++ {
++ AVR32_OPERAND_PICO_OUT2,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMAC3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMAC3] },
++ NULL, 4,
++ {
++ AVR32_OPERAND_PICO_OUT3,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMUL0,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMUL0] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSVMUL1], 4,
++ {
++ AVR32_OPERAND_PICO_OUT0,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMUL1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMUL1] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSVMUL2], 4,
++ {
++ AVR32_OPERAND_PICO_OUT1,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMUL2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMUL2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSVMUL3], 4,
++ {
++ AVR32_OPERAND_PICO_OUT2,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSVMUL3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSVMUL3] },
++ NULL, 4,
++ {
++ AVR32_OPERAND_PICO_OUT3,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMAC0,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMAC0] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOVMAC1], 4,
++ {
++ AVR32_OPERAND_PICO_OUT0,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMAC1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMAC1] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOVMAC2], 4,
++ {
++ AVR32_OPERAND_PICO_OUT1,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMAC2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMAC2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOVMAC3], 4,
++ {
++ AVR32_OPERAND_PICO_OUT2,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMAC3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMAC], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMAC3] },
++ NULL, 4,
++ {
++ AVR32_OPERAND_PICO_OUT3,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMUL0,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMUL0] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOVMUL1], 4,
++ {
++ AVR32_OPERAND_PICO_OUT0,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMUL1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMUL1] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOVMUL2], 4,
++ {
++ AVR32_OPERAND_PICO_OUT1,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMUL2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMUL2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOVMUL3], 4,
++ {
++ AVR32_OPERAND_PICO_OUT2,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOVMUL3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOVMUL], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOVMUL3] },
++ NULL, 4,
++ {
++ AVR32_OPERAND_PICO_OUT3,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_IN,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLD_D2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLD_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLD_D2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOLD_D3], 2,
++ {
++ AVR32_OPERAND_PICO_REG_D,
++ AVR32_OPERAND_INTREG_PREDEC,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLD_D3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLD_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLD_D3] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOLD_D1], 2,
++ {
++ AVR32_OPERAND_PICO_REG_D,
++ AVR32_OPERAND_INTREG_INDEX,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLD_D1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLD_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLD_D1] },
++ NULL, 2,
++ {
++ AVR32_OPERAND_PICO_REG_D,
++ AVR32_OPERAND_INTREG_UDISP_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLD_W2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLD_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLD_W2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOLD_W3], 2,
++ {
++ AVR32_OPERAND_PICO_REG_W,
++ AVR32_OPERAND_INTREG_PREDEC,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLD_W3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLD_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLD_W3] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOLD_W1], 2,
++ {
++ AVR32_OPERAND_PICO_REG_W,
++ AVR32_OPERAND_INTREG_INDEX,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLD_W1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLD_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLD_W1] },
++ NULL, 2,
++ {
++ AVR32_OPERAND_PICO_REG_W,
++ AVR32_OPERAND_INTREG_UDISP_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLDM_D,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLDM_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLDM_D] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOLDM_D_PU], -2,
++ {
++ AVR32_OPERAND_INTREG,
++ AVR32_OPERAND_PICO_REGLIST_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLDM_D_PU,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLDM_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLDM_D_PU] },
++ NULL, -2,
++ {
++ AVR32_OPERAND_INTREG_POSTINC,
++ AVR32_OPERAND_PICO_REGLIST_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLDM_W,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLDM_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLDM_W] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOLDM_W_PU], -2,
++ {
++ AVR32_OPERAND_INTREG,
++ AVR32_OPERAND_PICO_REGLIST_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOLDM_W_PU,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOLDM_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOLDM_W_PU] },
++ NULL, -2,
++ {
++ AVR32_OPERAND_INTREG_POSTINC,
++ AVR32_OPERAND_PICO_REGLIST_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOMV_D1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOMV_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOMV_D1] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOMV_D2], 2,
++ {
++ AVR32_OPERAND_DWREG,
++ AVR32_OPERAND_PICO_REG_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOMV_D2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOMV_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOMV_D2] },
++ NULL, 2,
++ {
++ AVR32_OPERAND_PICO_REG_D,
++ AVR32_OPERAND_DWREG,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOMV_W1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOMV_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOMV_W1] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOMV_W2], 2,
++ {
++ AVR32_OPERAND_INTREG,
++ AVR32_OPERAND_PICO_REG_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOMV_W2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOMV_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOMV_W2] },
++ NULL, 2,
++ {
++ AVR32_OPERAND_PICO_REG_W,
++ AVR32_OPERAND_INTREG,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOST_D2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOST_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOST_D2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOST_D3], 2,
++ {
++ AVR32_OPERAND_INTREG_POSTINC,
++ AVR32_OPERAND_PICO_REG_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOST_D3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOST_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOST_D3] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOST_D1], 2,
++ {
++ AVR32_OPERAND_INTREG_INDEX,
++ AVR32_OPERAND_PICO_REG_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOST_D1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOST_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOST_D1] },
++ NULL, 2,
++ {
++ AVR32_OPERAND_INTREG_UDISP_W,
++ AVR32_OPERAND_PICO_REG_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOST_W2,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOST_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOST_W2] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOST_W3], 2,
++ {
++ AVR32_OPERAND_INTREG_POSTINC,
++ AVR32_OPERAND_PICO_REG_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOST_W3,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOST_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOST_W3] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOST_W1], 2,
++ {
++ AVR32_OPERAND_INTREG_INDEX,
++ AVR32_OPERAND_PICO_REG_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOST_W1,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOST_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOST_W1] },
++ NULL, 2,
++ {
++ AVR32_OPERAND_INTREG_UDISP_W,
++ AVR32_OPERAND_PICO_REG_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSTM_D,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSTM_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSTM_D] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSTM_D_PU], -2,
++ {
++ AVR32_OPERAND_INTREG,
++ AVR32_OPERAND_PICO_REGLIST_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSTM_D_PU,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSTM_D], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSTM_D_PU] },
++ NULL, -2,
++ {
++ AVR32_OPERAND_INTREG_PREDEC,
++ AVR32_OPERAND_PICO_REGLIST_D,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSTM_W,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSTM_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSTM_W] },
++ &avr32_syntax_table[AVR32_SYNTAX_PICOSTM_W_PU], -2,
++ {
++ AVR32_OPERAND_INTREG,
++ AVR32_OPERAND_PICO_REGLIST_W,
++ },
++ },
++ {
++ AVR32_SYNTAX_PICOSTM_W_PU,
++ AVR32_PICO, &avr32_mnemonic_table[AVR32_MNEMONIC_PICOSTM_W], AVR32_PARSER_ALIAS,
++ { .alias = &avr32_alias_table[AVR32_ALIAS_PICOSTM_W_PU] },
++ NULL, -2,
++ {
++ AVR32_OPERAND_INTREG_PREDEC,
++ AVR32_OPERAND_PICO_REGLIST_W,
++ },
++ },
++ SYNTAX_NORMAL2(RSUBEQ, RSUBEQ, RSUBEQ, INTREG, SIGNED_CONST, AVR32_V1),
++ SYNTAX_NORMAL2(RSUBNE, RSUBNE, RSUBNE, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBCC, RSUBCC, RSUBCC, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBCS, RSUBCS, RSUBCS, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBGE, RSUBGE, RSUBGE, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBLT, RSUBLT, RSUBLT, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBMI, RSUBMI, RSUBMI, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBPL, RSUBPL, RSUBPL, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBLS, RSUBLS, RSUBLS, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBGT, RSUBGT, RSUBGT, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBLE, RSUBLE, RSUBLE, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBHI, RSUBHI, RSUBHI, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBVS, RSUBVS, RSUBVS, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBVC, RSUBVC, RSUBVC, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBQS, RSUBQS, RSUBQS, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBAL, RSUBAL, RSUBAL, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBHS, RSUBHS, RSUBCC, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL2(RSUBLO, RSUBLO, RSUBCS, INTREG, SIGNED_CONST, AVR32_V2),
++ SYNTAX_NORMAL3(ADDEQ, ADDEQ, ADDEQ, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDNE, ADDNE, ADDNE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDCC, ADDCC, ADDCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDCS, ADDCS, ADDCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDGE, ADDGE, ADDGE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDLT, ADDLT, ADDLT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDMI, ADDMI, ADDMI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDPL, ADDPL, ADDPL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDLS, ADDLS, ADDLS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDGT, ADDGT, ADDGT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDLE, ADDLE, ADDLE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDHI, ADDHI, ADDHI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDVS, ADDVS, ADDVS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDVC, ADDVC, ADDVC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDQS, ADDQS, ADDQS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDAL, ADDAL, ADDAL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDHS, ADDHS, ADDCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ADDLO, ADDLO, ADDCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2EQ, SUBEQ, SUB2EQ, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2NE, SUBNE, SUB2NE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2CC, SUBCC, SUB2CC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2CS, SUBCS, SUB2CS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2GE, SUBGE, SUB2GE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2LT, SUBLT, SUB2LT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2MI, SUBMI, SUB2MI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2PL, SUBPL, SUB2PL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2LS, SUBLS, SUB2LS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2GT, SUBGT, SUB2GT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2LE, SUBLE, SUB2LE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2HI, SUBHI, SUB2HI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2VS, SUBVS, SUB2VS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2VC, SUBVC, SUB2VC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2QS, SUBQS, SUB2QS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2AL, SUBAL, SUB2AL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2HS, SUBHS, SUB2CC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(SUB2LO, SUBLO, SUB2CS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDEQ, ANDEQ, ANDEQ, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDNE, ANDNE, ANDNE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDCC, ANDCC, ANDCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDCS, ANDCS, ANDCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDGE, ANDGE, ANDGE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDLT, ANDLT, ANDLT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDMI, ANDMI, ANDMI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDPL, ANDPL, ANDPL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDLS, ANDLS, ANDLS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDGT, ANDGT, ANDGT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDLE, ANDLE, ANDLE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDHI, ANDHI, ANDHI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDVS, ANDVS, ANDVS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDVC, ANDVC, ANDVC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDQS, ANDQS, ANDQS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDAL, ANDAL, ANDAL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDHS, ANDHS, ANDCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ANDLO, ANDLO, ANDCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(OREQ, OREQ, OREQ, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORNE, ORNE, ORNE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORCC, ORCC, ORCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORCS, ORCS, ORCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORGE, ORGE, ORGE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORLT, ORLT, ORLT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORMI, ORMI, ORMI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORPL, ORPL, ORPL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORLS, ORLS, ORLS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORGT, ORGT, ORGT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORLE, ORLE, ORLE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORHI, ORHI, ORHI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORVS, ORVS, ORVS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORVC, ORVC, ORVC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORQS, ORQS, ORQS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORAL, ORAL, ORAL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORHS, ORHS, ORCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(ORLO, ORLO, ORCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EOREQ, EOREQ, EOREQ, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORNE, EORNE, EORNE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORCC, EORCC, EORCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORCS, EORCS, EORCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORGE, EORGE, EORGE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORLT, EORLT, EORLT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORMI, EORMI, EORMI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORPL, EORPL, EORPL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORLS, EORLS, EORLS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORGT, EORGT, EORGT, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORLE, EORLE, EORLE, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORHI, EORHI, EORHI, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORVS, EORVS, EORVS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORVC, EORVC, EORVC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORQS, EORQS, EORQS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORAL, EORAL, EORAL, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORHS, EORHS, EORCC, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL3(EORLO, EORLO, EORCS, INTREG, INTREG, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WEQ, LD_WEQ, LD_WEQ, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WNE, LD_WNE, LD_WNE, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WCC, LD_WCC, LD_WCC, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WCS, LD_WCS, LD_WCS, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WGE, LD_WGE, LD_WGE, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WLT, LD_WLT, LD_WLT, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WMI, LD_WMI, LD_WMI, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WPL, LD_WPL, LD_WPL, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WLS, LD_WLS, LD_WLS, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WGT, LD_WGT, LD_WGT, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WLE, LD_WLE, LD_WLE, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WHI, LD_WHI, LD_WHI, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WVS, LD_WVS, LD_WVS, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WVC, LD_WVC, LD_WVC, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WQS, LD_WQS, LD_WQS, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WAL, LD_WAL, LD_WAL, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WHS, LD_WHS, LD_WCC, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_WLO, LD_WLO, LD_WCS, INTREG, INTREG_UDISP_W, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHEQ, LD_SHEQ, LD_SHEQ, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHNE, LD_SHNE, LD_SHNE, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHCC, LD_SHCC, LD_SHCC, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHCS, LD_SHCS, LD_SHCS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHGE, LD_SHGE, LD_SHGE, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHLT, LD_SHLT, LD_SHLT, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHMI, LD_SHMI, LD_SHMI, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHPL, LD_SHPL, LD_SHPL, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHLS, LD_SHLS, LD_SHLS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHGT, LD_SHGT, LD_SHGT, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHLE, LD_SHLE, LD_SHLE, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHHI, LD_SHHI, LD_SHHI, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHVS, LD_SHVS, LD_SHVS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHVC, LD_SHVC, LD_SHVC, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHQS, LD_SHQS, LD_SHQS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHAL, LD_SHAL, LD_SHAL, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHHS, LD_SHHS, LD_SHCC, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SHLO, LD_SHLO, LD_SHCS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHEQ, LD_UHEQ, LD_UHEQ, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHNE, LD_UHNE, LD_UHNE, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHCC, LD_UHCC, LD_UHCC, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHCS, LD_UHCS, LD_UHCS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHGE, LD_UHGE, LD_UHGE, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHLT, LD_UHLT, LD_UHLT, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHMI, LD_UHMI, LD_UHMI, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHPL, LD_UHPL, LD_UHPL, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHLS, LD_UHLS, LD_UHLS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHGT, LD_UHGT, LD_UHGT, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHLE, LD_UHLE, LD_UHLE, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHHI, LD_UHHI, LD_UHHI, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHVS, LD_UHVS, LD_UHVS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHVC, LD_UHVC, LD_UHVC, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHQS, LD_UHQS, LD_UHQS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHAL, LD_UHAL, LD_UHAL, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHHS, LD_UHHS, LD_UHCC, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UHLO, LD_UHLO, LD_UHCS, INTREG, INTREG_UDISP_H, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBEQ, LD_SBEQ, LD_SBEQ, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBNE, LD_SBNE, LD_SBNE, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBCC, LD_SBCC, LD_SBCC, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBCS, LD_SBCS, LD_SBCS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBGE, LD_SBGE, LD_SBGE, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBLT, LD_SBLT, LD_SBLT, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBMI, LD_SBMI, LD_SBMI, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBPL, LD_SBPL, LD_SBPL, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBLS, LD_SBLS, LD_SBLS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBGT, LD_SBGT, LD_SBGT, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBLE, LD_SBLE, LD_SBLE, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBHI, LD_SBHI, LD_SBHI, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBVS, LD_SBVS, LD_SBVS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBVC, LD_SBVC, LD_SBVC, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBQS, LD_SBQS, LD_SBQS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBAL, LD_SBAL, LD_SBAL, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBHS, LD_SBHS, LD_SBCC, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_SBLO, LD_SBLO, LD_SBCS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBEQ, LD_UBEQ, LD_UBEQ, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBNE, LD_UBNE, LD_UBNE, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBCC, LD_UBCC, LD_UBCC, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBCS, LD_UBCS, LD_UBCS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBGE, LD_UBGE, LD_UBGE, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBLT, LD_UBLT, LD_UBLT, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBMI, LD_UBMI, LD_UBMI, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBPL, LD_UBPL, LD_UBPL, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBLS, LD_UBLS, LD_UBLS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBGT, LD_UBGT, LD_UBGT, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBLE, LD_UBLE, LD_UBLE, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBHI, LD_UBHI, LD_UBHI, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBVS, LD_UBVS, LD_UBVS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBVC, LD_UBVC, LD_UBVC, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBQS, LD_UBQS, LD_UBQS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBAL, LD_UBAL, LD_UBAL, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBHS, LD_UBHS, LD_UBCC, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(LD_UBLO, LD_UBLO, LD_UBCS, INTREG, INTREG_UDISP, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WEQ, ST_WEQ, ST_WEQ, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WNE, ST_WNE, ST_WNE, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WCC, ST_WCC, ST_WCC, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WCS, ST_WCS, ST_WCS, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WGE, ST_WGE, ST_WGE, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WLT, ST_WLT, ST_WLT, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WMI, ST_WMI, ST_WMI, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WPL, ST_WPL, ST_WPL, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WLS, ST_WLS, ST_WLS, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WGT, ST_WGT, ST_WGT, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WLE, ST_WLE, ST_WLE, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WHI, ST_WHI, ST_WHI, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WVS, ST_WVS, ST_WVS, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WVC, ST_WVC, ST_WVC, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WQS, ST_WQS, ST_WQS, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WAL, ST_WAL, ST_WAL, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WHS, ST_WHS, ST_WCC, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_WLO, ST_WLO, ST_WCS, INTREG_UDISP_W, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HEQ, ST_HEQ, ST_HEQ, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HNE, ST_HNE, ST_HNE, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HCC, ST_HCC, ST_HCC, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HCS, ST_HCS, ST_HCS, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HGE, ST_HGE, ST_HGE, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HLT, ST_HLT, ST_HLT, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HMI, ST_HMI, ST_HMI, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HPL, ST_HPL, ST_HPL, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HLS, ST_HLS, ST_HLS, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HGT, ST_HGT, ST_HGT, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HLE, ST_HLE, ST_HLE, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HHI, ST_HHI, ST_HHI, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HVS, ST_HVS, ST_HVS, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HVC, ST_HVC, ST_HVC, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HQS, ST_HQS, ST_HQS, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HAL, ST_HAL, ST_HAL, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HHS, ST_HHS, ST_HCC, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_HLO, ST_HLO, ST_HCS, INTREG_UDISP_H, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BEQ, ST_BEQ, ST_BEQ, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BNE, ST_BNE, ST_BNE, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BCC, ST_BCC, ST_BCC, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BCS, ST_BCS, ST_BCS, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BGE, ST_BGE, ST_BGE, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BLT, ST_BLT, ST_BLT, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BMI, ST_BMI, ST_BMI, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BPL, ST_BPL, ST_BPL, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BLS, ST_BLS, ST_BLS, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BGT, ST_BGT, ST_BGT, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BLE, ST_BLE, ST_BLE, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BHI, ST_BHI, ST_BHI, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BVS, ST_BVS, ST_BVS, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BVC, ST_BVC, ST_BVC, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BQS, ST_BQS, ST_BQS, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BAL, ST_BAL, ST_BAL, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BHS, ST_BHS, ST_BCC, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(ST_BLO, ST_BLO, ST_BCS, INTREG_UDISP, INTREG, AVR32_V2),
++ SYNTAX_NORMAL2(MOVH, MOVH, MOVH, INTREG, UNSIGNED_CONST, AVR32_V2),
++
++ };
++
++#define NORMAL_MNEMONIC(name, syntax, str) \
++ { \
++ AVR32_MNEMONIC_##name, str, \
++ &avr32_syntax_table[AVR32_SYNTAX_##syntax], \
++ }
++#define FP_MNEMONIC(name, syntax, str) \
++ NORMAL_MNEMONIC(name##_S, syntax##_S, str ".s"), \
++ NORMAL_MNEMONIC(name##_D, syntax##_D, str ".d")
++
++const struct avr32_mnemonic avr32_mnemonic_table[] =
++ {
++ NORMAL_MNEMONIC(ABS, ABS, "abs"),
++ NORMAL_MNEMONIC(ACALL, ACALL, "acall"),
++ NORMAL_MNEMONIC(ACR, ACR, "acr"),
++ NORMAL_MNEMONIC(ADC, ADC, "adc"),
++ NORMAL_MNEMONIC(ADD, ADD1, "add"),
++ NORMAL_MNEMONIC(ADDABS, ADDABS, "addabs"),
++ NORMAL_MNEMONIC(ADDHH_W, ADDHH_W, "addhh.w"),
++ NORMAL_MNEMONIC(AND, AND1, "and"),
++ NORMAL_MNEMONIC(ANDH, ANDH, "andh"),
++ NORMAL_MNEMONIC(ANDL, ANDL, "andl"),
++ NORMAL_MNEMONIC(ANDN, ANDN, "andn"),
++ NORMAL_MNEMONIC(ASR, ASR1, "asr"),
++ NORMAL_MNEMONIC(BFEXTS, BFEXTS, "bfexts"),
++ NORMAL_MNEMONIC(BFEXTU, BFEXTU, "bfextu"),
++ NORMAL_MNEMONIC(BFINS, BFINS, "bfins"),
++ NORMAL_MNEMONIC(BLD, BLD, "bld"),
++ NORMAL_MNEMONIC(BREQ, BREQ1, "breq"),
++ NORMAL_MNEMONIC(BRNE, BRNE1, "brne"),
++ NORMAL_MNEMONIC(BRCC, BRCC1, "brcc"),
++ NORMAL_MNEMONIC(BRCS, BRCS1, "brcs"),
++ NORMAL_MNEMONIC(BRGE, BRGE1, "brge"),
++ NORMAL_MNEMONIC(BRLT, BRLT1, "brlt"),
++ NORMAL_MNEMONIC(BRMI, BRMI1, "brmi"),
++ NORMAL_MNEMONIC(BRPL, BRPL1, "brpl"),
++ NORMAL_MNEMONIC(BRHS, BRHS1, "brhs"),
++ NORMAL_MNEMONIC(BRLO, BRLO1, "brlo"),
++ NORMAL_MNEMONIC(BRLS, BRLS, "brls"),
++ NORMAL_MNEMONIC(BRGT, BRGT, "brgt"),
++ NORMAL_MNEMONIC(BRLE, BRLE, "brle"),
++ NORMAL_MNEMONIC(BRHI, BRHI, "brhi"),
++ NORMAL_MNEMONIC(BRVS, BRVS, "brvs"),
++ NORMAL_MNEMONIC(BRVC, BRVC, "brvc"),
++ NORMAL_MNEMONIC(BRQS, BRQS, "brqs"),
++ NORMAL_MNEMONIC(BRAL, BRAL, "bral"),
++ NORMAL_MNEMONIC(BREAKPOINT, BREAKPOINT, "breakpoint"),
++ NORMAL_MNEMONIC(BREV, BREV, "brev"),
++ NORMAL_MNEMONIC(BST, BST, "bst"),
++ NORMAL_MNEMONIC(CACHE, CACHE, "cache"),
++ NORMAL_MNEMONIC(CASTS_B, CASTS_B, "casts.b"),
++ NORMAL_MNEMONIC(CASTS_H, CASTS_H, "casts.h"),
++ NORMAL_MNEMONIC(CASTU_B, CASTU_B, "castu.b"),
++ NORMAL_MNEMONIC(CASTU_H, CASTU_H, "castu.h"),
++ NORMAL_MNEMONIC(CBR, CBR, "cbr"),
++ NORMAL_MNEMONIC(CLZ, CLZ, "clz"),
++ NORMAL_MNEMONIC(COM, COM, "com"),
++ NORMAL_MNEMONIC(COP, COP, "cop"),
++ NORMAL_MNEMONIC(CP_B, CP_B, "cp.b"),
++ NORMAL_MNEMONIC(CP_H, CP_H, "cp.h"),
++ NORMAL_MNEMONIC(CP_W, CP_W1, "cp.w"),
++ NORMAL_MNEMONIC(CP, CP_W1, "cp"),
++ NORMAL_MNEMONIC(CPC, CPC1, "cpc"),
++ NORMAL_MNEMONIC(CSRF, CSRF, "csrf"),
++ NORMAL_MNEMONIC(CSRFCZ, CSRFCZ, "csrfcz"),
++ NORMAL_MNEMONIC(DIVS, DIVS, "divs"),
++ NORMAL_MNEMONIC(DIVU, DIVU, "divu"),
++ NORMAL_MNEMONIC(EOR, EOR1, "eor"),
++ NORMAL_MNEMONIC(EORL, EORL, "eorl"),
++ NORMAL_MNEMONIC(EORH, EORH, "eorh"),
++ NORMAL_MNEMONIC(FRS, FRS, "frs"),
++ NORMAL_MNEMONIC(SSCALL, SSCALL, "sscall"),
++ NORMAL_MNEMONIC(RETSS, RETSS, "retss"),
++ NORMAL_MNEMONIC(ICALL, ICALL, "icall"),
++ NORMAL_MNEMONIC(INCJOSP, INCJOSP, "incjosp"),
++ NORMAL_MNEMONIC(LD_D, LD_D1, "ld.d"),
++ NORMAL_MNEMONIC(LD_SB, LD_SB2, "ld.sb"),
++ NORMAL_MNEMONIC(LD_UB, LD_UB1, "ld.ub"),
++ NORMAL_MNEMONIC(LD_SH, LD_SH1, "ld.sh"),
++ NORMAL_MNEMONIC(LD_UH, LD_UH1, "ld.uh"),
++ NORMAL_MNEMONIC(LD_W, LD_W1, "ld.w"),
++ NORMAL_MNEMONIC(LDC_D, LDC_D3, "ldc.d"),
++ NORMAL_MNEMONIC(LDC_W, LDC_W3, "ldc.w"),
++ NORMAL_MNEMONIC(LDC0_D, LDC0_D, "ldc0.d"),
++ NORMAL_MNEMONIC(LDC0_W, LDC0_W, "ldc0.w"),
++ NORMAL_MNEMONIC(LDCM_D, LDCM_D, "ldcm.d"),
++ NORMAL_MNEMONIC(LDCM_W, LDCM_W, "ldcm.w"),
++ NORMAL_MNEMONIC(LDDPC, LDDPC, "lddpc"),
++ NORMAL_MNEMONIC(LDDSP, LDDSP, "lddsp"),
++ NORMAL_MNEMONIC(LDINS_B, LDINS_B, "ldins.b"),
++ NORMAL_MNEMONIC(LDINS_H, LDINS_H, "ldins.h"),
++ NORMAL_MNEMONIC(LDM, LDM, "ldm"),
++ NORMAL_MNEMONIC(LDMTS, LDMTS, "ldmts"),
++ NORMAL_MNEMONIC(LDSWP_SH, LDSWP_SH, "ldswp.sh"),
++ NORMAL_MNEMONIC(LDSWP_UH, LDSWP_UH, "ldswp.uh"),
++ NORMAL_MNEMONIC(LDSWP_W, LDSWP_W, "ldswp.w"),
++ NORMAL_MNEMONIC(LSL, LSL1, "lsl"),
++ NORMAL_MNEMONIC(LSR, LSR1, "lsr"),
++ NORMAL_MNEMONIC(MAC, MAC, "mac"),
++ NORMAL_MNEMONIC(MACHH_D, MACHH_D, "machh.d"),
++ NORMAL_MNEMONIC(MACHH_W, MACHH_W, "machh.w"),
++ NORMAL_MNEMONIC(MACS_D, MACS_D, "macs.d"),
++ NORMAL_MNEMONIC(MACSATHH_W, MACSATHH_W, "macsathh.w"),
++ NORMAL_MNEMONIC(MACU_D, MACUD, "macu.d"),
++ NORMAL_MNEMONIC(MACWH_D, MACWH_D, "macwh.d"),
++ NORMAL_MNEMONIC(MAX, MAX, "max"),
++ NORMAL_MNEMONIC(MCALL, MCALL, "mcall"),
++ NORMAL_MNEMONIC(MFDR, MFDR, "mfdr"),
++ NORMAL_MNEMONIC(MFSR, MFSR, "mfsr"),
++ NORMAL_MNEMONIC(MIN, MIN, "min"),
++ NORMAL_MNEMONIC(MOV, MOV3, "mov"),
++ NORMAL_MNEMONIC(MOVEQ, MOVEQ1, "moveq"),
++ NORMAL_MNEMONIC(MOVNE, MOVNE1, "movne"),
++ NORMAL_MNEMONIC(MOVCC, MOVCC1, "movcc"),
++ NORMAL_MNEMONIC(MOVCS, MOVCS1, "movcs"),
++ NORMAL_MNEMONIC(MOVGE, MOVGE1, "movge"),
++ NORMAL_MNEMONIC(MOVLT, MOVLT1, "movlt"),
++ NORMAL_MNEMONIC(MOVMI, MOVMI1, "movmi"),
++ NORMAL_MNEMONIC(MOVPL, MOVPL1, "movpl"),
++ NORMAL_MNEMONIC(MOVLS, MOVLS1, "movls"),
++ NORMAL_MNEMONIC(MOVGT, MOVGT1, "movgt"),
++ NORMAL_MNEMONIC(MOVLE, MOVLE1, "movle"),
++ NORMAL_MNEMONIC(MOVHI, MOVHI1, "movhi"),
++ NORMAL_MNEMONIC(MOVVS, MOVVS1, "movvs"),
++ NORMAL_MNEMONIC(MOVVC, MOVVC1, "movvc"),
++ NORMAL_MNEMONIC(MOVQS, MOVQS1, "movqs"),
++ NORMAL_MNEMONIC(MOVAL, MOVAL1, "moval"),
++ NORMAL_MNEMONIC(MOVHS, MOVHS1, "movhs"),
++ NORMAL_MNEMONIC(MOVLO, MOVLO1, "movlo"),
++ NORMAL_MNEMONIC(MTDR, MTDR, "mtdr"),
++ NORMAL_MNEMONIC(MTSR, MTSR, "mtsr"),
++ NORMAL_MNEMONIC(MUL, MUL1, "mul"),
++ NORMAL_MNEMONIC(MULHH_W, MULHH_W, "mulhh.w"),
++ NORMAL_MNEMONIC(MULNHH_W, MULNHH_W, "mulnhh.w"),
++ NORMAL_MNEMONIC(MULNWH_D, MULNWH_D, "mulnwh.d"),
++ NORMAL_MNEMONIC(MULS_D, MULSD, "muls.d"),
++ NORMAL_MNEMONIC(MULSATHH_H, MULSATHH_H, "mulsathh.h"),
++ NORMAL_MNEMONIC(MULSATHH_W, MULSATHH_W, "mulsathh.w"),
++ NORMAL_MNEMONIC(MULSATRNDHH_H, MULSATRNDHH_H, "mulsatrndhh.h"),
++ NORMAL_MNEMONIC(MULSATRNDWH_W, MULSATRNDWH_W, "mulsatrndwh.w"),
++ NORMAL_MNEMONIC(MULSATWH_W, MULSATWH_W, "mulsatwh.w"),
++ NORMAL_MNEMONIC(MULU_D, MULU_D, "mulu.d"),
++ NORMAL_MNEMONIC(MULWH_D, MULWH_D, "mulwh.d"),
++ NORMAL_MNEMONIC(MUSFR, MUSFR, "musfr"),
++ NORMAL_MNEMONIC(MUSTR, MUSTR, "mustr"),
++ NORMAL_MNEMONIC(MVCR_D, MVCR_D, "mvcr.d"),
++ NORMAL_MNEMONIC(MVCR_W, MVCR_W, "mvcr.w"),
++ NORMAL_MNEMONIC(MVRC_D, MVRC_D, "mvrc.d"),
++ NORMAL_MNEMONIC(MVRC_W, MVRC_W, "mvrc.w"),
++ NORMAL_MNEMONIC(NEG, NEG, "neg"),
++ NORMAL_MNEMONIC(NOP, NOP, "nop"),
++ NORMAL_MNEMONIC(OR, OR1, "or"),
++ NORMAL_MNEMONIC(ORH, ORH, "orh"),
++ NORMAL_MNEMONIC(ORL, ORL, "orl"),
++ NORMAL_MNEMONIC(PABS_SB, PABS_SB, "pabs.sb"),
++ NORMAL_MNEMONIC(PABS_SH, PABS_SH, "pabs.sh"),
++ NORMAL_MNEMONIC(PACKSH_SB, PACKSH_SB, "packsh.sb"),
++ NORMAL_MNEMONIC(PACKSH_UB, PACKSH_UB, "packsh.ub"),
++ NORMAL_MNEMONIC(PACKW_SH, PACKW_SH, "packw.sh"),
++ NORMAL_MNEMONIC(PADD_B, PADD_B, "padd.b"),
++ NORMAL_MNEMONIC(PADD_H, PADD_H, "padd.h"),
++ NORMAL_MNEMONIC(PADDH_SH, PADDH_SH, "paddh.sh"),
++ NORMAL_MNEMONIC(PADDH_UB, PADDH_UB, "paddh.ub"),
++ NORMAL_MNEMONIC(PADDS_SB, PADDS_SB, "padds.sb"),
++ NORMAL_MNEMONIC(PADDS_SH, PADDS_SH, "padds.sh"),
++ NORMAL_MNEMONIC(PADDS_UB, PADDS_UB, "padds.ub"),
++ NORMAL_MNEMONIC(PADDS_UH, PADDS_UH, "padds.uh"),
++ NORMAL_MNEMONIC(PADDSUB_H, PADDSUB_H, "paddsub.h"),
++ NORMAL_MNEMONIC(PADDSUBH_SH, PADDSUBH_SH, "paddsubh.sh"),
++ NORMAL_MNEMONIC(PADDSUBS_SH, PADDSUBS_SH, "paddsubs.sh"),
++ NORMAL_MNEMONIC(PADDSUBS_UH, PADDSUBS_UH, "paddsubs.uh"),
++ NORMAL_MNEMONIC(PADDX_H, PADDX_H, "paddx.h"),
++ NORMAL_MNEMONIC(PADDXH_SH, PADDXH_SH, "paddxh.sh"),
++ NORMAL_MNEMONIC(PADDXS_SH, PADDXS_SH, "paddxs.sh"),
++ NORMAL_MNEMONIC(PADDXS_UH, PADDXS_UH, "paddxs.uh"),
++ NORMAL_MNEMONIC(PASR_B, PASR_B, "pasr.b"),
++ NORMAL_MNEMONIC(PASR_H, PASR_H, "pasr.h"),
++ NORMAL_MNEMONIC(PAVG_SH, PAVG_SH, "pavg.sh"),
++ NORMAL_MNEMONIC(PAVG_UB, PAVG_UB, "pavg.ub"),
++ NORMAL_MNEMONIC(PLSL_B, PLSL_B, "plsl.b"),
++ NORMAL_MNEMONIC(PLSL_H, PLSL_H, "plsl.h"),
++ NORMAL_MNEMONIC(PLSR_B, PLSR_B, "plsr.b"),
++ NORMAL_MNEMONIC(PLSR_H, PLSR_H, "plsr.h"),
++ NORMAL_MNEMONIC(PMAX_SH, PMAX_SH, "pmax.sh"),
++ NORMAL_MNEMONIC(PMAX_UB, PMAX_UB, "pmax.ub"),
++ NORMAL_MNEMONIC(PMIN_SH, PMIN_SH, "pmin.sh"),
++ NORMAL_MNEMONIC(PMIN_UB, PMIN_UB, "pmin.ub"),
++ NORMAL_MNEMONIC(POPJC, POPJC, "popjc"),
++ NORMAL_MNEMONIC(POPM, POPM, "popm"),
++ NORMAL_MNEMONIC(PREF, PREF, "pref"),
++ NORMAL_MNEMONIC(PSAD, PSAD, "psad"),
++ NORMAL_MNEMONIC(PSUB_B, PSUB_B, "psub.b"),
++ NORMAL_MNEMONIC(PSUB_H, PSUB_H, "psub.h"),
++ NORMAL_MNEMONIC(PSUBADD_H, PSUBADD_H, "psubadd.h"),
++ NORMAL_MNEMONIC(PSUBADDH_SH, PSUBADDH_SH, "psubaddh.sh"),
++ NORMAL_MNEMONIC(PSUBADDS_SH, PSUBADDS_SH, "psubadds.sh"),
++ NORMAL_MNEMONIC(PSUBADDS_UH, PSUBADDS_UH, "psubadds.uh"),
++ NORMAL_MNEMONIC(PSUBH_SH, PSUBH_SH, "psubh.sh"),
++ NORMAL_MNEMONIC(PSUBH_UB, PSUBH_UB, "psubh.ub"),
++ NORMAL_MNEMONIC(PSUBS_SB, PSUBS_SB, "psubs.sb"),
++ NORMAL_MNEMONIC(PSUBS_SH, PSUBS_SH, "psubs.sh"),
++ NORMAL_MNEMONIC(PSUBS_UB, PSUBS_UB, "psubs.ub"),
++ NORMAL_MNEMONIC(PSUBS_UH, PSUBS_UH, "psubs.uh"),
++ NORMAL_MNEMONIC(PSUBX_H, PSUBX_H, "psubx.h"),
++ NORMAL_MNEMONIC(PSUBXH_SH, PSUBXH_SH, "psubxh.sh"),
++ NORMAL_MNEMONIC(PSUBXS_SH, PSUBXS_SH, "psubxs.sh"),
++ NORMAL_MNEMONIC(PSUBXS_UH, PSUBXS_UH, "psubxs.uh"),
++ NORMAL_MNEMONIC(PUNPCKSB_H, PUNPCKSB_H, "punpcksb.h"),
++ NORMAL_MNEMONIC(PUNPCKUB_H, PUNPCKUB_H, "punpckub.h"),
++ NORMAL_MNEMONIC(PUSHJC, PUSHJC, "pushjc"),
++ NORMAL_MNEMONIC(PUSHM, PUSHM, "pushm"),
++ NORMAL_MNEMONIC(RCALL, RCALL1, "rcall"),
++ NORMAL_MNEMONIC(RETEQ, RETEQ, "reteq"),
++ NORMAL_MNEMONIC(RETNE, RETNE, "retne"),
++ NORMAL_MNEMONIC(RETCC, RETCC, "retcc"),
++ NORMAL_MNEMONIC(RETCS, RETCS, "retcs"),
++ NORMAL_MNEMONIC(RETGE, RETGE, "retge"),
++ NORMAL_MNEMONIC(RETLT, RETLT, "retlt"),
++ NORMAL_MNEMONIC(RETMI, RETMI, "retmi"),
++ NORMAL_MNEMONIC(RETPL, RETPL, "retpl"),
++ NORMAL_MNEMONIC(RETLS, RETLS, "retls"),
++ NORMAL_MNEMONIC(RETGT, RETGT, "retgt"),
++ NORMAL_MNEMONIC(RETLE, RETLE, "retle"),
++ NORMAL_MNEMONIC(RETHI, RETHI, "rethi"),
++ NORMAL_MNEMONIC(RETVS, RETVS, "retvs"),
++ NORMAL_MNEMONIC(RETVC, RETVC, "retvc"),
++ NORMAL_MNEMONIC(RETQS, RETQS, "retqs"),
++ NORMAL_MNEMONIC(RETAL, RETAL, "retal"),
++ NORMAL_MNEMONIC(RETHS, RETHS, "reths"),
++ NORMAL_MNEMONIC(RETLO, RETLO, "retlo"),
++ NORMAL_MNEMONIC(RET, RETAL, "ret"),
++ NORMAL_MNEMONIC(RETD, RETD, "retd"),
++ NORMAL_MNEMONIC(RETE, RETE, "rete"),
++ NORMAL_MNEMONIC(RETJ, RETJ, "retj"),
++ NORMAL_MNEMONIC(RETS, RETS, "rets"),
++ NORMAL_MNEMONIC(RJMP, RJMP, "rjmp"),
++ NORMAL_MNEMONIC(ROL, ROL, "rol"),
++ NORMAL_MNEMONIC(ROR, ROR, "ror"),
++ NORMAL_MNEMONIC(RSUB, RSUB1, "rsub"),
++ NORMAL_MNEMONIC(SATADD_H, SATADD_H, "satadd.h"),
++ NORMAL_MNEMONIC(SATADD_W, SATADD_W, "satadd.w"),
++ NORMAL_MNEMONIC(SATRNDS, SATRNDS, "satrnds"),
++ NORMAL_MNEMONIC(SATRNDU, SATRNDU, "satrndu"),
++ NORMAL_MNEMONIC(SATS, SATS, "sats"),
++ NORMAL_MNEMONIC(SATSUB_H, SATSUB_H, "satsub.h"),
++ NORMAL_MNEMONIC(SATSUB_W, SATSUB_W1, "satsub.w"),
++ NORMAL_MNEMONIC(SATU, SATU, "satu"),
++ NORMAL_MNEMONIC(SBC, SBC, "sbc"),
++ NORMAL_MNEMONIC(SBR, SBR, "sbr"),
++ NORMAL_MNEMONIC(SCALL, SCALL, "scall"),
++ NORMAL_MNEMONIC(SCR, SCR, "scr"),
++ NORMAL_MNEMONIC(SLEEP, SLEEP, "sleep"),
++ NORMAL_MNEMONIC(SREQ, SREQ, "sreq"),
++ NORMAL_MNEMONIC(SRNE, SRNE, "srne"),
++ NORMAL_MNEMONIC(SRCC, SRCC, "srcc"),
++ NORMAL_MNEMONIC(SRCS, SRCS, "srcs"),
++ NORMAL_MNEMONIC(SRGE, SRGE, "srge"),
++ NORMAL_MNEMONIC(SRLT, SRLT, "srlt"),
++ NORMAL_MNEMONIC(SRMI, SRMI, "srmi"),
++ NORMAL_MNEMONIC(SRPL, SRPL, "srpl"),
++ NORMAL_MNEMONIC(SRLS, SRLS, "srls"),
++ NORMAL_MNEMONIC(SRGT, SRGT, "srgt"),
++ NORMAL_MNEMONIC(SRLE, SRLE, "srle"),
++ NORMAL_MNEMONIC(SRHI, SRHI, "srhi"),
++ NORMAL_MNEMONIC(SRVS, SRVS, "srvs"),
++ NORMAL_MNEMONIC(SRVC, SRVC, "srvc"),
++ NORMAL_MNEMONIC(SRQS, SRQS, "srqs"),
++ NORMAL_MNEMONIC(SRAL, SRAL, "sral"),
++ NORMAL_MNEMONIC(SRHS, SRHS, "srhs"),
++ NORMAL_MNEMONIC(SRLO, SRLO, "srlo"),
++ NORMAL_MNEMONIC(SSRF, SSRF, "ssrf"),
++ NORMAL_MNEMONIC(ST_B, ST_B1, "st.b"),
++ NORMAL_MNEMONIC(ST_D, ST_D1, "st.d"),
++ NORMAL_MNEMONIC(ST_H, ST_H1, "st.h"),
++ NORMAL_MNEMONIC(ST_W, ST_W1, "st.w"),
++ NORMAL_MNEMONIC(STC_D, STC_D3, "stc.d"),
++ NORMAL_MNEMONIC(STC_W, STC_W3, "stc.w"),
++ NORMAL_MNEMONIC(STC0_D, STC0_D, "stc0.d"),
++ NORMAL_MNEMONIC(STC0_W, STC0_W, "stc0.w"),
++ NORMAL_MNEMONIC(STCM_D, STCM_D, "stcm.d"),
++ NORMAL_MNEMONIC(STCM_W, STCM_W, "stcm.w"),
++ NORMAL_MNEMONIC(STCOND, STCOND, "stcond"),
++ NORMAL_MNEMONIC(STDSP, STDSP, "stdsp"),
++ NORMAL_MNEMONIC(STHH_W, STHH_W2, "sthh.w"),
++ NORMAL_MNEMONIC(STM, STM, "stm"),
++ NORMAL_MNEMONIC(STMTS, STMTS, "stmts"),
++ NORMAL_MNEMONIC(STSWP_H, STSWP_H, "stswp.h"),
++ NORMAL_MNEMONIC(STSWP_W, STSWP_W, "stswp.w"),
++ NORMAL_MNEMONIC(SUB, SUB1, "sub"),
++ NORMAL_MNEMONIC(SUBEQ, SUBEQ, "subeq"),
++ NORMAL_MNEMONIC(SUBNE, SUBNE, "subne"),
++ NORMAL_MNEMONIC(SUBCC, SUBCC, "subcc"),
++ NORMAL_MNEMONIC(SUBCS, SUBCS, "subcs"),
++ NORMAL_MNEMONIC(SUBGE, SUBGE, "subge"),
++ NORMAL_MNEMONIC(SUBLT, SUBLT, "sublt"),
++ NORMAL_MNEMONIC(SUBMI, SUBMI, "submi"),
++ NORMAL_MNEMONIC(SUBPL, SUBPL, "subpl"),
++ NORMAL_MNEMONIC(SUBLS, SUBLS, "subls"),
++ NORMAL_MNEMONIC(SUBGT, SUBGT, "subgt"),
++ NORMAL_MNEMONIC(SUBLE, SUBLE, "suble"),
++ NORMAL_MNEMONIC(SUBHI, SUBHI, "subhi"),
++ NORMAL_MNEMONIC(SUBVS, SUBVS, "subvs"),
++ NORMAL_MNEMONIC(SUBVC, SUBVC, "subvc"),
++ NORMAL_MNEMONIC(SUBQS, SUBQS, "subqs"),
++ NORMAL_MNEMONIC(SUBAL, SUBAL, "subal"),
++ NORMAL_MNEMONIC(SUBHS, SUBHS, "subhs"),
++ NORMAL_MNEMONIC(SUBLO, SUBLO, "sublo"),
++ NORMAL_MNEMONIC(SUBFEQ, SUBFEQ, "subfeq"),
++ NORMAL_MNEMONIC(SUBFNE, SUBFNE, "subfne"),
++ NORMAL_MNEMONIC(SUBFCC, SUBFCC, "subfcc"),
++ NORMAL_MNEMONIC(SUBFCS, SUBFCS, "subfcs"),
++ NORMAL_MNEMONIC(SUBFGE, SUBFGE, "subfge"),
++ NORMAL_MNEMONIC(SUBFLT, SUBFLT, "subflt"),
++ NORMAL_MNEMONIC(SUBFMI, SUBFMI, "subfmi"),
++ NORMAL_MNEMONIC(SUBFPL, SUBFPL, "subfpl"),
++ NORMAL_MNEMONIC(SUBFLS, SUBFLS, "subfls"),
++ NORMAL_MNEMONIC(SUBFGT, SUBFGT, "subfgt"),
++ NORMAL_MNEMONIC(SUBFLE, SUBFLE, "subfle"),
++ NORMAL_MNEMONIC(SUBFHI, SUBFHI, "subfhi"),
++ NORMAL_MNEMONIC(SUBFVS, SUBFVS, "subfvs"),
++ NORMAL_MNEMONIC(SUBFVC, SUBFVC, "subfvc"),
++ NORMAL_MNEMONIC(SUBFQS, SUBFQS, "subfqs"),
++ NORMAL_MNEMONIC(SUBFAL, SUBFAL, "subfal"),
++ NORMAL_MNEMONIC(SUBFHS, SUBFHS, "subfhs"),
++ NORMAL_MNEMONIC(SUBFLO, SUBFLO, "subflo"),
++ NORMAL_MNEMONIC(SUBHH_W, SUBHH_W, "subhh.w"),
++ NORMAL_MNEMONIC(SWAP_B, SWAP_B, "swap.b"),
++ NORMAL_MNEMONIC(SWAP_BH, SWAP_BH, "swap.bh"),
++ NORMAL_MNEMONIC(SWAP_H, SWAP_H, "swap.h"),
++ NORMAL_MNEMONIC(SYNC, SYNC, "sync"),
++ NORMAL_MNEMONIC(TLBR, TLBR, "tlbr"),
++ NORMAL_MNEMONIC(TLBS, TLBS, "tlbs"),
++ NORMAL_MNEMONIC(TLBW, TLBW, "tlbw"),
++ NORMAL_MNEMONIC(TNBZ, TNBZ, "tnbz"),
++ NORMAL_MNEMONIC(TST, TST, "tst"),
++ NORMAL_MNEMONIC(XCHG, XCHG, "xchg"),
++ NORMAL_MNEMONIC(MEMC, MEMC, "memc"),
++ NORMAL_MNEMONIC(MEMS, MEMS, "mems"),
++ NORMAL_MNEMONIC(MEMT, MEMT, "memt"),
++ NORMAL_MNEMONIC (FMAC_S, FMAC_S, "fmac.s"),
++ NORMAL_MNEMONIC (FNMAC_S, FNMAC_S, "fnmac.s"),
++ NORMAL_MNEMONIC (FMSC_S, FMSC_S, "fmsc.s"),
++ NORMAL_MNEMONIC (FNMSC_S, FNMSC_S, "fnmsc.s"),
++ NORMAL_MNEMONIC (FMUL_S, FMUL_S, "fmul.s"),
++ NORMAL_MNEMONIC (FNMUL_S, FNMUL_S, "fnmul.s"),
++ NORMAL_MNEMONIC (FADD_S, FADD_S, "fadd.s"),
++ NORMAL_MNEMONIC (FSUB_S, FSUB_S, "fsub.s"),
++ NORMAL_MNEMONIC (FCASTRS_SW, FCASTRS_SW, "fcastrs.sw"),
++ NORMAL_MNEMONIC (FCASTRS_UW, FCASTRS_UW, "fcastrs.uw"),
++ NORMAL_MNEMONIC (FCASTSW_S, FCASTSW_S, "fcastsw.s"),
++ NORMAL_MNEMONIC (FCASTUW_S, FCASTUW_S, "fcastuw.s"),
++ NORMAL_MNEMONIC (FCMP_S, FCMP_S, "fcmp.s"),
++ NORMAL_MNEMONIC (FCHK_S, FCHK_S, "fchk.s"),
++ NORMAL_MNEMONIC (FRCPA_S, FRCPA_S, "frcpa.s"),
++ NORMAL_MNEMONIC (FRSQRTA_S, FRSQRTA_S, "frsqrta.s"),
++ NORMAL_MNEMONIC(LDA_W, LDA_W, "lda.w"),
++ NORMAL_MNEMONIC(CALL, CALL, "call"),
++ NORMAL_MNEMONIC(PICOSVMAC, PICOSVMAC0, "picosvmac"),
++ NORMAL_MNEMONIC(PICOSVMUL, PICOSVMUL0, "picosvmul"),
++ NORMAL_MNEMONIC(PICOVMAC, PICOVMAC0, "picovmac"),
++ NORMAL_MNEMONIC(PICOVMUL, PICOVMUL0, "picovmul"),
++ NORMAL_MNEMONIC(PICOLD_D, PICOLD_D2, "picold.d"),
++ NORMAL_MNEMONIC(PICOLD_W, PICOLD_W2, "picold.w"),
++ NORMAL_MNEMONIC(PICOLDM_D, PICOLDM_D, "picoldm.d"),
++ NORMAL_MNEMONIC(PICOLDM_W, PICOLDM_W, "picoldm.w"),
++ NORMAL_MNEMONIC(PICOMV_D, PICOMV_D1, "picomv.d"),
++ NORMAL_MNEMONIC(PICOMV_W, PICOMV_W1, "picomv.w"),
++ NORMAL_MNEMONIC(PICOST_D, PICOST_D2, "picost.d"),
++ NORMAL_MNEMONIC(PICOST_W, PICOST_W2, "picost.w"),
++ NORMAL_MNEMONIC(PICOSTM_D, PICOSTM_D, "picostm.d"),
++ NORMAL_MNEMONIC(PICOSTM_W, PICOSTM_W, "picostm.w"),
++ NORMAL_MNEMONIC(RSUBEQ, RSUBEQ, "rsubeq"),
++ NORMAL_MNEMONIC(RSUBNE, RSUBNE, "rsubne"),
++ NORMAL_MNEMONIC(RSUBCC, RSUBCC, "rsubcc"),
++ NORMAL_MNEMONIC(RSUBCS, RSUBCS, "rsubcs"),
++ NORMAL_MNEMONIC(RSUBGE, RSUBGE, "rsubge"),
++ NORMAL_MNEMONIC(RSUBLT, RSUBLT, "rsublt"),
++ NORMAL_MNEMONIC(RSUBMI, RSUBMI, "rsubmi"),
++ NORMAL_MNEMONIC(RSUBPL, RSUBPL, "rsubpl"),
++ NORMAL_MNEMONIC(RSUBLS, RSUBLS, "rsubls"),
++ NORMAL_MNEMONIC(RSUBGT, RSUBGT, "rsubgt"),
++ NORMAL_MNEMONIC(RSUBLE, RSUBLE, "rsuble"),
++ NORMAL_MNEMONIC(RSUBHI, RSUBHI, "rsubhi"),
++ NORMAL_MNEMONIC(RSUBVS, RSUBVS, "rsubvs"),
++ NORMAL_MNEMONIC(RSUBVC, RSUBVC, "rsubvc"),
++ NORMAL_MNEMONIC(RSUBQS, RSUBQS, "rsubqs"),
++ NORMAL_MNEMONIC(RSUBAL, RSUBAL, "rsubal"),
++ NORMAL_MNEMONIC(RSUBHS, RSUBHS, "rsubhs"),
++ NORMAL_MNEMONIC(RSUBLO, RSUBLO, "rsublo"),
++ NORMAL_MNEMONIC(ADDEQ, ADDEQ, "addeq"),
++ NORMAL_MNEMONIC(ADDNE, ADDNE, "addne"),
++ NORMAL_MNEMONIC(ADDCC, ADDCC, "addcc"),
++ NORMAL_MNEMONIC(ADDCS, ADDCS, "addcs"),
++ NORMAL_MNEMONIC(ADDGE, ADDGE, "addge"),
++ NORMAL_MNEMONIC(ADDLT, ADDLT, "addlt"),
++ NORMAL_MNEMONIC(ADDMI, ADDMI, "addmi"),
++ NORMAL_MNEMONIC(ADDPL, ADDPL, "addpl"),
++ NORMAL_MNEMONIC(ADDLS, ADDLS, "addls"),
++ NORMAL_MNEMONIC(ADDGT, ADDGT, "addgt"),
++ NORMAL_MNEMONIC(ADDLE, ADDLE, "addle"),
++ NORMAL_MNEMONIC(ADDHI, ADDHI, "addhi"),
++ NORMAL_MNEMONIC(ADDVS, ADDVS, "addvs"),
++ NORMAL_MNEMONIC(ADDVC, ADDVC, "addvc"),
++ NORMAL_MNEMONIC(ADDQS, ADDQS, "addqs"),
++ NORMAL_MNEMONIC(ADDAL, ADDAL, "addal"),
++ NORMAL_MNEMONIC(ADDHS, ADDHS, "addhs"),
++ NORMAL_MNEMONIC(ADDLO, ADDLO, "addlo"),
++ NORMAL_MNEMONIC(ANDEQ, ANDEQ, "andeq"),
++ NORMAL_MNEMONIC(ANDNE, ANDNE, "andne"),
++ NORMAL_MNEMONIC(ANDCC, ANDCC, "andcc"),
++ NORMAL_MNEMONIC(ANDCS, ANDCS, "andcs"),
++ NORMAL_MNEMONIC(ANDGE, ANDGE, "andge"),
++ NORMAL_MNEMONIC(ANDLT, ANDLT, "andlt"),
++ NORMAL_MNEMONIC(ANDMI, ANDMI, "andmi"),
++ NORMAL_MNEMONIC(ANDPL, ANDPL, "andpl"),
++ NORMAL_MNEMONIC(ANDLS, ANDLS, "andls"),
++ NORMAL_MNEMONIC(ANDGT, ANDGT, "andgt"),
++ NORMAL_MNEMONIC(ANDLE, ANDLE, "andle"),
++ NORMAL_MNEMONIC(ANDHI, ANDHI, "andhi"),
++ NORMAL_MNEMONIC(ANDVS, ANDVS, "andvs"),
++ NORMAL_MNEMONIC(ANDVC, ANDVC, "andvc"),
++ NORMAL_MNEMONIC(ANDQS, ANDQS, "andqs"),
++ NORMAL_MNEMONIC(ANDAL, ANDAL, "andal"),
++ NORMAL_MNEMONIC(ANDHS, ANDHS, "andhs"),
++ NORMAL_MNEMONIC(ANDLO, ANDLO, "andlo"),
++ NORMAL_MNEMONIC(OREQ, OREQ, "oreq"),
++ NORMAL_MNEMONIC(ORNE, ORNE, "orne"),
++ NORMAL_MNEMONIC(ORCC, ORCC, "orcc"),
++ NORMAL_MNEMONIC(ORCS, ORCS, "orcs"),
++ NORMAL_MNEMONIC(ORGE, ORGE, "orge"),
++ NORMAL_MNEMONIC(ORLT, ORLT, "orlt"),
++ NORMAL_MNEMONIC(ORMI, ORMI, "ormi"),
++ NORMAL_MNEMONIC(ORPL, ORPL, "orpl"),
++ NORMAL_MNEMONIC(ORLS, ORLS, "orls"),
++ NORMAL_MNEMONIC(ORGT, ORGT, "orgt"),
++ NORMAL_MNEMONIC(ORLE, ORLE, "orle"),
++ NORMAL_MNEMONIC(ORHI, ORHI, "orhi"),
++ NORMAL_MNEMONIC(ORVS, ORVS, "orvs"),
++ NORMAL_MNEMONIC(ORVC, ORVC, "orvc"),
++ NORMAL_MNEMONIC(ORQS, ORQS, "orqs"),
++ NORMAL_MNEMONIC(ORAL, ORAL, "oral"),
++ NORMAL_MNEMONIC(ORHS, ORHS, "orhs"),
++ NORMAL_MNEMONIC(ORLO, ORLO, "orlo"),
++ NORMAL_MNEMONIC(EOREQ, EOREQ, "eoreq"),
++ NORMAL_MNEMONIC(EORNE, EORNE, "eorne"),
++ NORMAL_MNEMONIC(EORCC, EORCC, "eorcc"),
++ NORMAL_MNEMONIC(EORCS, EORCS, "eorcs"),
++ NORMAL_MNEMONIC(EORGE, EORGE, "eorge"),
++ NORMAL_MNEMONIC(EORLT, EORLT, "eorlt"),
++ NORMAL_MNEMONIC(EORMI, EORMI, "eormi"),
++ NORMAL_MNEMONIC(EORPL, EORPL, "eorpl"),
++ NORMAL_MNEMONIC(EORLS, EORLS, "eorls"),
++ NORMAL_MNEMONIC(EORGT, EORGT, "eorgt"),
++ NORMAL_MNEMONIC(EORLE, EORLE, "eorle"),
++ NORMAL_MNEMONIC(EORHI, EORHI, "eorhi"),
++ NORMAL_MNEMONIC(EORVS, EORVS, "eorvs"),
++ NORMAL_MNEMONIC(EORVC, EORVC, "eorvc"),
++ NORMAL_MNEMONIC(EORQS, EORQS, "eorqs"),
++ NORMAL_MNEMONIC(EORAL, EORAL, "eoral"),
++ NORMAL_MNEMONIC(EORHS, EORHS, "eorhs"),
++ NORMAL_MNEMONIC(EORLO, EORLO, "eorlo"),
++ NORMAL_MNEMONIC(LD_WEQ, LD_WEQ, "ld.weq"),
++ NORMAL_MNEMONIC(LD_WNE, LD_WNE, "ld.wne"),
++ NORMAL_MNEMONIC(LD_WCC, LD_WCC, "ld.wcc"),
++ NORMAL_MNEMONIC(LD_WCS, LD_WCS, "ld.wcs"),
++ NORMAL_MNEMONIC(LD_WGE, LD_WGE, "ld.wge"),
++ NORMAL_MNEMONIC(LD_WLT, LD_WLT, "ld.wlt"),
++ NORMAL_MNEMONIC(LD_WMI, LD_WMI, "ld.wmi"),
++ NORMAL_MNEMONIC(LD_WPL, LD_WPL, "ld.wpl"),
++ NORMAL_MNEMONIC(LD_WLS, LD_WLS, "ld.wls"),
++ NORMAL_MNEMONIC(LD_WGT, LD_WGT, "ld.wgt"),
++ NORMAL_MNEMONIC(LD_WLE, LD_WLE, "ld.wle"),
++ NORMAL_MNEMONIC(LD_WHI, LD_WHI, "ld.whi"),
++ NORMAL_MNEMONIC(LD_WVS, LD_WVS, "ld.wvs"),
++ NORMAL_MNEMONIC(LD_WVC, LD_WVC, "ld.wvc"),
++ NORMAL_MNEMONIC(LD_WQS, LD_WQS, "ld.wqs"),
++ NORMAL_MNEMONIC(LD_WAL, LD_WAL, "ld.wal"),
++ NORMAL_MNEMONIC(LD_WHS, LD_WHS, "ld.whs"),
++ NORMAL_MNEMONIC(LD_WLO, LD_WLO, "ld.wlo"),
++ NORMAL_MNEMONIC(LD_SHEQ, LD_SHEQ, "ld.sheq"),
++ NORMAL_MNEMONIC(LD_SHNE, LD_SHNE, "ld.shne"),
++ NORMAL_MNEMONIC(LD_SHCC, LD_SHCC, "ld.shcc"),
++ NORMAL_MNEMONIC(LD_SHCS, LD_SHCS, "ld.shcs"),
++ NORMAL_MNEMONIC(LD_SHGE, LD_SHGE, "ld.shge"),
++ NORMAL_MNEMONIC(LD_SHLT, LD_SHLT, "ld.shlt"),
++ NORMAL_MNEMONIC(LD_SHMI, LD_SHMI, "ld.shmi"),
++ NORMAL_MNEMONIC(LD_SHPL, LD_SHPL, "ld.shpl"),
++ NORMAL_MNEMONIC(LD_SHLS, LD_SHLS, "ld.shls"),
++ NORMAL_MNEMONIC(LD_SHGT, LD_SHGT, "ld.shgt"),
++ NORMAL_MNEMONIC(LD_SHLE, LD_SHLE, "ld.shle"),
++ NORMAL_MNEMONIC(LD_SHHI, LD_SHHI, "ld.shhi"),
++ NORMAL_MNEMONIC(LD_SHVS, LD_SHVS, "ld.shvs"),
++ NORMAL_MNEMONIC(LD_SHVC, LD_SHVC, "ld.shvc"),
++ NORMAL_MNEMONIC(LD_SHQS, LD_SHQS, "ld.shqs"),
++ NORMAL_MNEMONIC(LD_SHAL, LD_SHAL, "ld.shal"),
++ NORMAL_MNEMONIC(LD_SHHS, LD_SHHS, "ld.shhs"),
++ NORMAL_MNEMONIC(LD_SHLO, LD_SHLO, "ld.shlo"),
++ NORMAL_MNEMONIC(LD_UHEQ, LD_UHEQ, "ld.uheq"),
++ NORMAL_MNEMONIC(LD_UHNE, LD_UHNE, "ld.uhne"),
++ NORMAL_MNEMONIC(LD_UHCC, LD_UHCC, "ld.uhcc"),
++ NORMAL_MNEMONIC(LD_UHCS, LD_UHCS, "ld.uhcs"),
++ NORMAL_MNEMONIC(LD_UHGE, LD_UHGE, "ld.uhge"),
++ NORMAL_MNEMONIC(LD_UHLT, LD_UHLT, "ld.uhlt"),
++ NORMAL_MNEMONIC(LD_UHMI, LD_UHMI, "ld.uhmi"),
++ NORMAL_MNEMONIC(LD_UHPL, LD_UHPL, "ld.uhpl"),
++ NORMAL_MNEMONIC(LD_UHLS, LD_UHLS, "ld.uhls"),
++ NORMAL_MNEMONIC(LD_UHGT, LD_UHGT, "ld.uhgt"),
++ NORMAL_MNEMONIC(LD_UHLE, LD_UHLE, "ld.uhle"),
++ NORMAL_MNEMONIC(LD_UHHI, LD_UHHI, "ld.uhhi"),
++ NORMAL_MNEMONIC(LD_UHVS, LD_UHVS, "ld.uhvs"),
++ NORMAL_MNEMONIC(LD_UHVC, LD_UHVC, "ld.uhvc"),
++ NORMAL_MNEMONIC(LD_UHQS, LD_UHQS, "ld.uhqs"),
++ NORMAL_MNEMONIC(LD_UHAL, LD_UHAL, "ld.uhal"),
++ NORMAL_MNEMONIC(LD_UHHS, LD_UHHS, "ld.uhhs"),
++ NORMAL_MNEMONIC(LD_UHLO, LD_UHLO, "ld.uhlo"),
++ NORMAL_MNEMONIC(LD_SBEQ, LD_SBEQ, "ld.sbeq"),
++ NORMAL_MNEMONIC(LD_SBNE, LD_SBNE, "ld.sbne"),
++ NORMAL_MNEMONIC(LD_SBCC, LD_SBCC, "ld.sbcc"),
++ NORMAL_MNEMONIC(LD_SBCS, LD_SBCS, "ld.sbcs"),
++ NORMAL_MNEMONIC(LD_SBGE, LD_SBGE, "ld.sbge"),
++ NORMAL_MNEMONIC(LD_SBLT, LD_SBLT, "ld.sblt"),
++ NORMAL_MNEMONIC(LD_SBMI, LD_SBMI, "ld.sbmi"),
++ NORMAL_MNEMONIC(LD_SBPL, LD_SBPL, "ld.sbpl"),
++ NORMAL_MNEMONIC(LD_SBLS, LD_SBLS, "ld.sbls"),
++ NORMAL_MNEMONIC(LD_SBGT, LD_SBGT, "ld.sbgt"),
++ NORMAL_MNEMONIC(LD_SBLE, LD_SBLE, "ld.sble"),
++ NORMAL_MNEMONIC(LD_SBHI, LD_SBHI, "ld.sbhi"),
++ NORMAL_MNEMONIC(LD_SBVS, LD_SBVS, "ld.sbvs"),
++ NORMAL_MNEMONIC(LD_SBVC, LD_SBVC, "ld.sbvc"),
++ NORMAL_MNEMONIC(LD_SBQS, LD_SBQS, "ld.sbqs"),
++ NORMAL_MNEMONIC(LD_SBAL, LD_SBAL, "ld.sbal"),
++ NORMAL_MNEMONIC(LD_SBHS, LD_SBHS, "ld.sbhs"),
++ NORMAL_MNEMONIC(LD_SBLO, LD_SBLO, "ld.sblo"),
++ NORMAL_MNEMONIC(LD_UBEQ, LD_UBEQ, "ld.ubeq"),
++ NORMAL_MNEMONIC(LD_UBNE, LD_UBNE, "ld.ubne"),
++ NORMAL_MNEMONIC(LD_UBCC, LD_UBCC, "ld.ubcc"),
++ NORMAL_MNEMONIC(LD_UBCS, LD_UBCS, "ld.ubcs"),
++ NORMAL_MNEMONIC(LD_UBGE, LD_UBGE, "ld.ubge"),
++ NORMAL_MNEMONIC(LD_UBLT, LD_UBLT, "ld.ublt"),
++ NORMAL_MNEMONIC(LD_UBMI, LD_UBMI, "ld.ubmi"),
++ NORMAL_MNEMONIC(LD_UBPL, LD_UBPL, "ld.ubpl"),
++ NORMAL_MNEMONIC(LD_UBLS, LD_UBLS, "ld.ubls"),
++ NORMAL_MNEMONIC(LD_UBGT, LD_UBGT, "ld.ubgt"),
++ NORMAL_MNEMONIC(LD_UBLE, LD_UBLE, "ld.uble"),
++ NORMAL_MNEMONIC(LD_UBHI, LD_UBHI, "ld.ubhi"),
++ NORMAL_MNEMONIC(LD_UBVS, LD_UBVS, "ld.ubvs"),
++ NORMAL_MNEMONIC(LD_UBVC, LD_UBVC, "ld.ubvc"),
++ NORMAL_MNEMONIC(LD_UBQS, LD_UBQS, "ld.ubqs"),
++ NORMAL_MNEMONIC(LD_UBAL, LD_UBAL, "ld.ubal"),
++ NORMAL_MNEMONIC(LD_UBHS, LD_UBHS, "ld.ubhs"),
++ NORMAL_MNEMONIC(LD_UBLO, LD_UBLO, "ld.ublo"),
++ NORMAL_MNEMONIC(ST_WEQ, ST_WEQ, "st.weq"),
++ NORMAL_MNEMONIC(ST_WNE, ST_WNE, "st.wne"),
++ NORMAL_MNEMONIC(ST_WCC, ST_WCC, "st.wcc"),
++ NORMAL_MNEMONIC(ST_WCS, ST_WCS, "st.wcs"),
++ NORMAL_MNEMONIC(ST_WGE, ST_WGE, "st.wge"),
++ NORMAL_MNEMONIC(ST_WLT, ST_WLT, "st.wlt"),
++ NORMAL_MNEMONIC(ST_WMI, ST_WMI, "st.wmi"),
++ NORMAL_MNEMONIC(ST_WPL, ST_WPL, "st.wpl"),
++ NORMAL_MNEMONIC(ST_WLS, ST_WLS, "st.wls"),
++ NORMAL_MNEMONIC(ST_WGT, ST_WGT, "st.wgt"),
++ NORMAL_MNEMONIC(ST_WLE, ST_WLE, "st.wle"),
++ NORMAL_MNEMONIC(ST_WHI, ST_WHI, "st.whi"),
++ NORMAL_MNEMONIC(ST_WVS, ST_WVS, "st.wvs"),
++ NORMAL_MNEMONIC(ST_WVC, ST_WVC, "st.wvc"),
++ NORMAL_MNEMONIC(ST_WQS, ST_WQS, "st.wqs"),
++ NORMAL_MNEMONIC(ST_WAL, ST_WAL, "st.wal"),
++ NORMAL_MNEMONIC(ST_WHS, ST_WHS, "st.whs"),
++ NORMAL_MNEMONIC(ST_WLO, ST_WLO, "st.wlo"),
++ NORMAL_MNEMONIC(ST_HEQ, ST_HEQ, "st.heq"),
++ NORMAL_MNEMONIC(ST_HNE, ST_HNE, "st.hne"),
++ NORMAL_MNEMONIC(ST_HCC, ST_HCC, "st.hcc"),
++ NORMAL_MNEMONIC(ST_HCS, ST_HCS, "st.hcs"),
++ NORMAL_MNEMONIC(ST_HGE, ST_HGE, "st.hge"),
++ NORMAL_MNEMONIC(ST_HLT, ST_HLT, "st.hlt"),
++ NORMAL_MNEMONIC(ST_HMI, ST_HMI, "st.hmi"),
++ NORMAL_MNEMONIC(ST_HPL, ST_HPL, "st.hpl"),
++ NORMAL_MNEMONIC(ST_HLS, ST_HLS, "st.hls"),
++ NORMAL_MNEMONIC(ST_HGT, ST_HGT, "st.hgt"),
++ NORMAL_MNEMONIC(ST_HLE, ST_HLE, "st.hle"),
++ NORMAL_MNEMONIC(ST_HHI, ST_HHI, "st.hhi"),
++ NORMAL_MNEMONIC(ST_HVS, ST_HVS, "st.hvs"),
++ NORMAL_MNEMONIC(ST_HVC, ST_HVC, "st.hvc"),
++ NORMAL_MNEMONIC(ST_HQS, ST_HQS, "st.hqs"),
++ NORMAL_MNEMONIC(ST_HAL, ST_HAL, "st.hal"),
++ NORMAL_MNEMONIC(ST_HHS, ST_HHS, "st.hhs"),
++ NORMAL_MNEMONIC(ST_HLO, ST_HLO, "st.hlo"),
++ NORMAL_MNEMONIC(ST_BEQ, ST_BEQ, "st.beq"),
++ NORMAL_MNEMONIC(ST_BNE, ST_BNE, "st.bne"),
++ NORMAL_MNEMONIC(ST_BCC, ST_BCC, "st.bcc"),
++ NORMAL_MNEMONIC(ST_BCS, ST_BCS, "st.bcs"),
++ NORMAL_MNEMONIC(ST_BGE, ST_BGE, "st.bge"),
++ NORMAL_MNEMONIC(ST_BLT, ST_BLT, "st.blt"),
++ NORMAL_MNEMONIC(ST_BMI, ST_BMI, "st.bmi"),
++ NORMAL_MNEMONIC(ST_BPL, ST_BPL, "st.bpl"),
++ NORMAL_MNEMONIC(ST_BLS, ST_BLS, "st.bls"),
++ NORMAL_MNEMONIC(ST_BGT, ST_BGT, "st.bgt"),
++ NORMAL_MNEMONIC(ST_BLE, ST_BLE, "st.ble"),
++ NORMAL_MNEMONIC(ST_BHI, ST_BHI, "st.bhi"),
++ NORMAL_MNEMONIC(ST_BVS, ST_BVS, "st.bvs"),
++ NORMAL_MNEMONIC(ST_BVC, ST_BVC, "st.bvc"),
++ NORMAL_MNEMONIC(ST_BQS, ST_BQS, "st.bqs"),
++ NORMAL_MNEMONIC(ST_BAL, ST_BAL, "st.bal"),
++ NORMAL_MNEMONIC(ST_BHS, ST_BHS, "st.bhs"),
++ NORMAL_MNEMONIC(ST_BLO, ST_BLO, "st.blo"),
++ NORMAL_MNEMONIC(MOVH, MOVH, "movh"),
++
++ };
++#undef NORMAL_MNEMONIC
++#undef ALIAS_MNEMONIC
++#undef FP_MNEMONIC
+--- /dev/null
++++ b/opcodes/avr32-opc.h
+@@ -0,0 +1,2341 @@
++/* Opcode tables for AVR32.
++ Copyright 2005,2006,2007,2008,2009 Atmel Corporation.
++
++ Written by Haavard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of libopcodes.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
++ 02111-1307, USA. */
++
++#include "bfd.h"
++
++#define AVR32_MAX_OPERANDS 8
++#define AVR32_MAX_FIELDS 8
++
++#define AVR32_V1 (1 << 1)
++#define AVR32_SIMD (1 << 2)
++#define AVR32_DSP (1 << 3)
++#define AVR32_RMW (1 << 4)
++#define AVR32_V2 (1 << 5)
++#define AVR32_V3 (1 << 6)
++#define AVR32_V3FP (1 << 7)
++#define AVR32_PICO (1 << 17)
++
++/* Registers we commonly refer to */
++#define AVR32_REG_R12 12
++#define AVR32_REG_SP 13
++#define AVR32_REG_LR 14
++#define AVR32_REG_PC 15
++
++struct avr32_ifield
++{
++ int id;
++ unsigned short bitsize;
++ unsigned short shift;
++ unsigned long mask;
++
++ /* If the value doesn't fit, it will be truncated with no warning */
++ void (*insert)(const struct avr32_ifield *, void *, unsigned long);
++ void (*extract)(const struct avr32_ifield *, void *, unsigned long *);
++};
++
++struct avr32_opcode
++{
++ int id;
++ int size;
++ unsigned long value;
++ unsigned long mask;
++ const struct avr32_syntax *syntax;
++ bfd_reloc_code_real_type reloc_type;
++ unsigned int nr_fields;
++ /* if relaxable, which field is variable, otherwise -1 */
++ int var_field;
++ const struct avr32_ifield *fields[AVR32_MAX_FIELDS];
++};
++
++struct avr32_alias
++{
++ int id;
++ const struct avr32_opcode *opc;
++ struct {
++ int is_opindex;
++ unsigned long value;
++ } operand_map[AVR32_MAX_OPERANDS];
++};
++
++struct avr32_syntax
++{
++ int id;
++ unsigned long isa_flags;
++ const struct avr32_mnemonic *mnemonic;
++ int type;
++ union {
++ const struct avr32_opcode *opc;
++ const struct avr32_alias *alias;
++ } u;
++ const struct avr32_syntax *next;
++ /* negative means "vararg" */
++ int nr_operands;
++ int operand[AVR32_MAX_OPERANDS];
++};
++
++#if 0
++#define AVR32_ALIAS_MAKE_CONST(val) ((val) | 0x80000000UL)
++#define AVR32_ALIAS_IS_CONST(mapval) (((mapval) & 0x80000000UL) != 0)
++#define AVR32_ALIAS_GET_CONST(mapval) ((mapval) & ~0x80000000UL)
++#endif
++
++struct avr32_mnemonic
++{
++ int id;
++ const char *name;
++ const struct avr32_syntax *syntax;
++};
++
++extern const struct avr32_ifield avr32_ifield_table[];
++extern struct avr32_opcode avr32_opc_table[];
++extern const struct avr32_syntax avr32_syntax_table[];
++extern const struct avr32_alias avr32_alias_table[];
++extern const struct avr32_mnemonic avr32_mnemonic_table[];
++
++extern void avr32_insert_simple(const struct avr32_ifield *field,
++ void *buf, unsigned long value);
++extern void avr32_insert_bit5c(const struct avr32_ifield *field,
++ void *buf, unsigned long value);
++extern void avr32_insert_k10(const struct avr32_ifield *field,
++ void *buf, unsigned long value);
++extern void avr32_insert_k21(const struct avr32_ifield *field,
++ void *buf, unsigned long value);
++extern void avr32_insert_cpop(const struct avr32_ifield *field,
++ void *buf, unsigned long value);
++extern void avr32_insert_k12cp(const struct avr32_ifield *field,
++ void *buf, unsigned long value);
++
++extern void avr32_extract_simple(const struct avr32_ifield *field,
++ void *buf, unsigned long *value);
++extern void avr32_extract_bit5c(const struct avr32_ifield *field,
++ void *buf, unsigned long *value);
++extern void avr32_extract_k10(const struct avr32_ifield *field,
++ void *buf, unsigned long *value);
++extern void avr32_extract_k21(const struct avr32_ifield *field,
++ void *buf, unsigned long *value);
++extern void avr32_extract_cpop(const struct avr32_ifield *field,
++ void *buf, unsigned long *value);
++extern void avr32_extract_k12cp(const struct avr32_ifield *field,
++ void *buf, unsigned long *value);
++
++enum avr32_operand_type
++{
++ AVR32_OPERAND_INTREG, /* just a register */
++ AVR32_OPERAND_INTREG_PREDEC, /* register with pre-decrement */
++ AVR32_OPERAND_INTREG_POSTINC, /* register with post-increment */
++ AVR32_OPERAND_INTREG_LSL, /* register with left shift */
++ AVR32_OPERAND_INTREG_LSR, /* register with right shift */
++ AVR32_OPERAND_INTREG_BSEL, /* register with byte selector */
++ AVR32_OPERAND_INTREG_HSEL, /* register with halfword selector */
++ AVR32_OPERAND_INTREG_SDISP, /* Rp[signed disp] */
++ AVR32_OPERAND_INTREG_SDISP_H, /* Rp[signed hword-aligned disp] */
++ AVR32_OPERAND_INTREG_SDISP_W, /* Rp[signed word-aligned disp] */
++ AVR32_OPERAND_INTREG_UDISP, /* Rp[unsigned disp] */
++ AVR32_OPERAND_INTREG_UDISP_H, /* Rp[unsigned hword-aligned disp] */
++ AVR32_OPERAND_INTREG_UDISP_W, /* Rp[unsigned word-aligned disp] */
++ AVR32_OPERAND_INTREG_INDEX, /* Rp[Ri << sa] */
++ AVR32_OPERAND_INTREG_XINDEX, /* Rp[Ri:bytesel << 2] */
++ AVR32_OPERAND_DWREG, /* Even-numbered register */
++ AVR32_OPERAND_PC_UDISP_W, /* PC[unsigned word-aligned disp] or label */
++ AVR32_OPERAND_SP, /* Just SP */
++ AVR32_OPERAND_SP_UDISP_W, /* SP[unsigned word-aligned disp] */
++ AVR32_OPERAND_CPNO,
++ AVR32_OPERAND_CPREG,
++ AVR32_OPERAND_CPREG_D,
++ AVR32_OPERAND_UNSIGNED_CONST,
++ AVR32_OPERAND_UNSIGNED_CONST_W,
++ AVR32_OPERAND_SIGNED_CONST,
++ AVR32_OPERAND_SIGNED_CONST_W,
++ AVR32_OPERAND_JMPLABEL,
++ AVR32_OPERAND_UNSIGNED_NUMBER,
++ AVR32_OPERAND_UNSIGNED_NUMBER_W,
++ AVR32_OPERAND_REGLIST8,
++ AVR32_OPERAND_REGLIST9,
++ AVR32_OPERAND_REGLIST16,
++ AVR32_OPERAND_REGLIST_LDM,
++ AVR32_OPERAND_REGLIST_CP8,
++ AVR32_OPERAND_REGLIST_CPD8,
++ AVR32_OPERAND_RETVAL,
++ AVR32_OPERAND_MCALL,
++ AVR32_OPERAND_JOSPINC,
++ AVR32_OPERAND_COH,
++ AVR32_OPERAND_PICO_REG_W,
++ AVR32_OPERAND_PICO_REG_D,
++ AVR32_OPERAND_PICO_REGLIST_W,
++ AVR32_OPERAND_PICO_REGLIST_D,
++ AVR32_OPERAND_PICO_IN,
++ AVR32_OPERAND_PICO_OUT0,
++ AVR32_OPERAND_PICO_OUT1,
++ AVR32_OPERAND_PICO_OUT2,
++ AVR32_OPERAND_PICO_OUT3,
++ AVR32_OPERAND__END_
++};
++#define AVR32_OPERAND_UNKNOWN AVR32_OPERAND__END_
++#define AVR32_NR_OPERANDS AVR32_OPERAND__END_
++
++enum avr32_ifield_type
++{
++ AVR32_IFIELD_RX,
++ AVR32_IFIELD_RY,
++ AVR32_IFIELD_COND4C,
++ AVR32_IFIELD_K8C,
++ AVR32_IFIELD_K7C,
++ AVR32_IFIELD_K5C,
++ AVR32_IFIELD_K3,
++ AVR32_IFIELD_RY_DW,
++ AVR32_IFIELD_COND4E,
++ AVR32_IFIELD_K8E,
++ AVR32_IFIELD_BIT5C,
++ AVR32_IFIELD_COND3,
++ AVR32_IFIELD_K10,
++ AVR32_IFIELD_POPM,
++ AVR32_IFIELD_K2,
++ AVR32_IFIELD_RD_E,
++ AVR32_IFIELD_RD_DW,
++ AVR32_IFIELD_X,
++ AVR32_IFIELD_Y,
++ AVR32_IFIELD_X2,
++ AVR32_IFIELD_Y2,
++ AVR32_IFIELD_K5E,
++ AVR32_IFIELD_PART2,
++ AVR32_IFIELD_PART1,
++ AVR32_IFIELD_K16,
++ AVR32_IFIELD_CACHEOP,
++ AVR32_IFIELD_K11,
++ AVR32_IFIELD_K21,
++ AVR32_IFIELD_CPOP,
++ AVR32_IFIELD_CPNO,
++ AVR32_IFIELD_CRD_RI,
++ AVR32_IFIELD_CRX,
++ AVR32_IFIELD_CRY,
++ AVR32_IFIELD_K7E,
++ AVR32_IFIELD_CRD_DW,
++ AVR32_IFIELD_PART1_K12,
++ AVR32_IFIELD_PART2_K12,
++ AVR32_IFIELD_K12,
++ AVR32_IFIELD_S5,
++ AVR32_IFIELD_K5E2,
++ AVR32_IFIELD_K4,
++ AVR32_IFIELD_COND4E2,
++ AVR32_IFIELD_K8E2,
++ AVR32_IFIELD_K6,
++ AVR32_IFIELD_MEM15,
++ AVR32_IFIELD_MEMB5,
++ AVR32_IFIELD_W,
++ AVR32_IFIELD_CM_HL,
++ AVR32_IFIELD_K12CP,
++ AVR32_IFIELD_K9E,
++ AVR32_IFIELD_FP_RX,
++ AVR32_IFIELD_FP_RY,
++ AVR32_IFIELD_FP_RD,
++ AVR32_IFIELD_FP_RA,
++ AVR32_IFIELD__END_,
++};
++#define AVR32_NR_IFIELDS AVR32_IFIELD__END_
++
++enum avr32_opc_type
++{
++ AVR32_OPC_ABS,
++ AVR32_OPC_ACALL,
++ AVR32_OPC_ACR,
++ AVR32_OPC_ADC,
++ AVR32_OPC_ADD1,
++ AVR32_OPC_ADD2,
++ AVR32_OPC_ADDABS,
++ AVR32_OPC_ADDHH_W,
++ AVR32_OPC_AND1,
++ AVR32_OPC_AND2,
++ AVR32_OPC_AND3,
++ AVR32_OPC_ANDH,
++ AVR32_OPC_ANDH_COH,
++ AVR32_OPC_ANDL,
++ AVR32_OPC_ANDL_COH,
++ AVR32_OPC_ANDN,
++ AVR32_OPC_ASR1,
++ AVR32_OPC_ASR3,
++ AVR32_OPC_ASR2,
++ AVR32_OPC_BLD,
++ AVR32_OPC_BREQ1,
++ AVR32_OPC_BRNE1,
++ AVR32_OPC_BRCC1,
++ AVR32_OPC_BRCS1,
++ AVR32_OPC_BRGE1,
++ AVR32_OPC_BRLT1,
++ AVR32_OPC_BRMI1,
++ AVR32_OPC_BRPL1,
++ AVR32_OPC_BREQ2,
++ AVR32_OPC_BRNE2,
++ AVR32_OPC_BRCC2,
++ AVR32_OPC_BRCS2,
++ AVR32_OPC_BRGE2,
++ AVR32_OPC_BRLT2,
++ AVR32_OPC_BRMI2,
++ AVR32_OPC_BRPL2,
++ AVR32_OPC_BRLS,
++ AVR32_OPC_BRGT,
++ AVR32_OPC_BRLE,
++ AVR32_OPC_BRHI,
++ AVR32_OPC_BRVS,
++ AVR32_OPC_BRVC,
++ AVR32_OPC_BRQS,
++ AVR32_OPC_BRAL,
++ AVR32_OPC_BREAKPOINT,
++ AVR32_OPC_BREV,
++ AVR32_OPC_BST,
++ AVR32_OPC_CACHE,
++ AVR32_OPC_CASTS_B,
++ AVR32_OPC_CASTS_H,
++ AVR32_OPC_CASTU_B,
++ AVR32_OPC_CASTU_H,
++ AVR32_OPC_CBR,
++ AVR32_OPC_CLZ,
++ AVR32_OPC_COM,
++ AVR32_OPC_COP,
++ AVR32_OPC_CP_B,
++ AVR32_OPC_CP_H,
++ AVR32_OPC_CP_W1,
++ AVR32_OPC_CP_W2,
++ AVR32_OPC_CP_W3,
++ AVR32_OPC_CPC1,
++ AVR32_OPC_CPC2,
++ AVR32_OPC_CSRF,
++ AVR32_OPC_CSRFCZ,
++ AVR32_OPC_DIVS,
++ AVR32_OPC_DIVU,
++ AVR32_OPC_EOR1,
++ AVR32_OPC_EOR2,
++ AVR32_OPC_EOR3,
++ AVR32_OPC_EORL,
++ AVR32_OPC_EORH,
++ AVR32_OPC_FRS,
++ AVR32_OPC_ICALL,
++ AVR32_OPC_INCJOSP,
++ AVR32_OPC_LD_D1,
++ AVR32_OPC_LD_D2,
++ AVR32_OPC_LD_D3,
++ AVR32_OPC_LD_D5,
++ AVR32_OPC_LD_D4,
++ AVR32_OPC_LD_SB2,
++ AVR32_OPC_LD_SB1,
++ AVR32_OPC_LD_UB1,
++ AVR32_OPC_LD_UB2,
++ AVR32_OPC_LD_UB5,
++ AVR32_OPC_LD_UB3,
++ AVR32_OPC_LD_UB4,
++ AVR32_OPC_LD_SH1,
++ AVR32_OPC_LD_SH2,
++ AVR32_OPC_LD_SH5,
++ AVR32_OPC_LD_SH3,
++ AVR32_OPC_LD_SH4,
++ AVR32_OPC_LD_UH1,
++ AVR32_OPC_LD_UH2,
++ AVR32_OPC_LD_UH5,
++ AVR32_OPC_LD_UH3,
++ AVR32_OPC_LD_UH4,
++ AVR32_OPC_LD_W1,
++ AVR32_OPC_LD_W2,
++ AVR32_OPC_LD_W5,
++ AVR32_OPC_LD_W6,
++ AVR32_OPC_LD_W3,
++ AVR32_OPC_LD_W4,
++ AVR32_OPC_LDC_D1,
++ AVR32_OPC_LDC_D2,
++ AVR32_OPC_LDC_D3,
++ AVR32_OPC_LDC_W1,
++ AVR32_OPC_LDC_W2,
++ AVR32_OPC_LDC_W3,
++ AVR32_OPC_LDC0_D,
++ AVR32_OPC_LDC0_W,
++ AVR32_OPC_LDCM_D,
++ AVR32_OPC_LDCM_D_PU,
++ AVR32_OPC_LDCM_W,
++ AVR32_OPC_LDCM_W_PU,
++ AVR32_OPC_LDDPC,
++ AVR32_OPC_LDDPC_EXT,
++ AVR32_OPC_LDDSP,
++ AVR32_OPC_LDINS_B,
++ AVR32_OPC_LDINS_H,
++ AVR32_OPC_LDM,
++ AVR32_OPC_LDMTS,
++ AVR32_OPC_LDMTS_PU,
++ AVR32_OPC_LDSWP_SH,
++ AVR32_OPC_LDSWP_UH,
++ AVR32_OPC_LDSWP_W,
++ AVR32_OPC_LSL1,
++ AVR32_OPC_LSL3,
++ AVR32_OPC_LSL2,
++ AVR32_OPC_LSR1,
++ AVR32_OPC_LSR3,
++ AVR32_OPC_LSR2,
++ AVR32_OPC_MAC,
++ AVR32_OPC_MACHH_D,
++ AVR32_OPC_MACHH_W,
++ AVR32_OPC_MACS_D,
++ AVR32_OPC_MACSATHH_W,
++ AVR32_OPC_MACUD,
++ AVR32_OPC_MACWH_D,
++ AVR32_OPC_MAX,
++ AVR32_OPC_MCALL,
++ AVR32_OPC_MFDR,
++ AVR32_OPC_MFSR,
++ AVR32_OPC_MIN,
++ AVR32_OPC_MOV3,
++ AVR32_OPC_MOV1,
++ AVR32_OPC_MOV2,
++ AVR32_OPC_MOVEQ1,
++ AVR32_OPC_MOVNE1,
++ AVR32_OPC_MOVCC1,
++ AVR32_OPC_MOVCS1,
++ AVR32_OPC_MOVGE1,
++ AVR32_OPC_MOVLT1,
++ AVR32_OPC_MOVMI1,
++ AVR32_OPC_MOVPL1,
++ AVR32_OPC_MOVLS1,
++ AVR32_OPC_MOVGT1,
++ AVR32_OPC_MOVLE1,
++ AVR32_OPC_MOVHI1,
++ AVR32_OPC_MOVVS1,
++ AVR32_OPC_MOVVC1,
++ AVR32_OPC_MOVQS1,
++ AVR32_OPC_MOVAL1,
++ AVR32_OPC_MOVEQ2,
++ AVR32_OPC_MOVNE2,
++ AVR32_OPC_MOVCC2,
++ AVR32_OPC_MOVCS2,
++ AVR32_OPC_MOVGE2,
++ AVR32_OPC_MOVLT2,
++ AVR32_OPC_MOVMI2,
++ AVR32_OPC_MOVPL2,
++ AVR32_OPC_MOVLS2,
++ AVR32_OPC_MOVGT2,
++ AVR32_OPC_MOVLE2,
++ AVR32_OPC_MOVHI2,
++ AVR32_OPC_MOVVS2,
++ AVR32_OPC_MOVVC2,
++ AVR32_OPC_MOVQS2,
++ AVR32_OPC_MOVAL2,
++ AVR32_OPC_MTDR,
++ AVR32_OPC_MTSR,
++ AVR32_OPC_MUL1,
++ AVR32_OPC_MUL2,
++ AVR32_OPC_MUL3,
++ AVR32_OPC_MULHH_W,
++ AVR32_OPC_MULNHH_W,
++ AVR32_OPC_MULNWH_D,
++ AVR32_OPC_MULSD,
++ AVR32_OPC_MULSATHH_H,
++ AVR32_OPC_MULSATHH_W,
++ AVR32_OPC_MULSATRNDHH_H,
++ AVR32_OPC_MULSATRNDWH_W,
++ AVR32_OPC_MULSATWH_W,
++ AVR32_OPC_MULU_D,
++ AVR32_OPC_MULWH_D,
++ AVR32_OPC_MUSFR,
++ AVR32_OPC_MUSTR,
++ AVR32_OPC_MVCR_D,
++ AVR32_OPC_MVCR_W,
++ AVR32_OPC_MVRC_D,
++ AVR32_OPC_MVRC_W,
++ AVR32_OPC_NEG,
++ AVR32_OPC_NOP,
++ AVR32_OPC_OR1,
++ AVR32_OPC_OR2,
++ AVR32_OPC_OR3,
++ AVR32_OPC_ORH,
++ AVR32_OPC_ORL,
++ AVR32_OPC_PABS_SB,
++ AVR32_OPC_PABS_SH,
++ AVR32_OPC_PACKSH_SB,
++ AVR32_OPC_PACKSH_UB,
++ AVR32_OPC_PACKW_SH,
++ AVR32_OPC_PADD_B,
++ AVR32_OPC_PADD_H,
++ AVR32_OPC_PADDH_SH,
++ AVR32_OPC_PADDH_UB,
++ AVR32_OPC_PADDS_SB,
++ AVR32_OPC_PADDS_SH,
++ AVR32_OPC_PADDS_UB,
++ AVR32_OPC_PADDS_UH,
++ AVR32_OPC_PADDSUB_H,
++ AVR32_OPC_PADDSUBH_SH,
++ AVR32_OPC_PADDSUBS_SH,
++ AVR32_OPC_PADDSUBS_UH,
++ AVR32_OPC_PADDX_H,
++ AVR32_OPC_PADDXH_SH,
++ AVR32_OPC_PADDXS_SH,
++ AVR32_OPC_PADDXS_UH,
++ AVR32_OPC_PASR_B,
++ AVR32_OPC_PASR_H,
++ AVR32_OPC_PAVG_SH,
++ AVR32_OPC_PAVG_UB,
++ AVR32_OPC_PLSL_B,
++ AVR32_OPC_PLSL_H,
++ AVR32_OPC_PLSR_B,
++ AVR32_OPC_PLSR_H,
++ AVR32_OPC_PMAX_SH,
++ AVR32_OPC_PMAX_UB,
++ AVR32_OPC_PMIN_SH,
++ AVR32_OPC_PMIN_UB,
++ AVR32_OPC_POPJC,
++ AVR32_OPC_POPM,
++ AVR32_OPC_POPM_E,
++ AVR32_OPC_PREF,
++ AVR32_OPC_PSAD,
++ AVR32_OPC_PSUB_B,
++ AVR32_OPC_PSUB_H,
++ AVR32_OPC_PSUBADD_H,
++ AVR32_OPC_PSUBADDH_SH,
++ AVR32_OPC_PSUBADDS_SH,
++ AVR32_OPC_PSUBADDS_UH,
++ AVR32_OPC_PSUBH_SH,
++ AVR32_OPC_PSUBH_UB,
++ AVR32_OPC_PSUBS_SB,
++ AVR32_OPC_PSUBS_SH,
++ AVR32_OPC_PSUBS_UB,
++ AVR32_OPC_PSUBS_UH,
++ AVR32_OPC_PSUBX_H,
++ AVR32_OPC_PSUBXH_SH,
++ AVR32_OPC_PSUBXS_SH,
++ AVR32_OPC_PSUBXS_UH,
++ AVR32_OPC_PUNPCKSB_H,
++ AVR32_OPC_PUNPCKUB_H,
++ AVR32_OPC_PUSHJC,
++ AVR32_OPC_PUSHM,
++ AVR32_OPC_PUSHM_E,
++ AVR32_OPC_RCALL1,
++ AVR32_OPC_RCALL2,
++ AVR32_OPC_RETEQ,
++ AVR32_OPC_RETNE,
++ AVR32_OPC_RETCC,
++ AVR32_OPC_RETCS,
++ AVR32_OPC_RETGE,
++ AVR32_OPC_RETLT,
++ AVR32_OPC_RETMI,
++ AVR32_OPC_RETPL,
++ AVR32_OPC_RETLS,
++ AVR32_OPC_RETGT,
++ AVR32_OPC_RETLE,
++ AVR32_OPC_RETHI,
++ AVR32_OPC_RETVS,
++ AVR32_OPC_RETVC,
++ AVR32_OPC_RETQS,
++ AVR32_OPC_RETAL,
++ AVR32_OPC_RETD,
++ AVR32_OPC_RETE,
++ AVR32_OPC_RETJ,
++ AVR32_OPC_RETS,
++ AVR32_OPC_RJMP,
++ AVR32_OPC_ROL,
++ AVR32_OPC_ROR,
++ AVR32_OPC_RSUB1,
++ AVR32_OPC_RSUB2,
++ AVR32_OPC_SATADD_H,
++ AVR32_OPC_SATADD_W,
++ AVR32_OPC_SATRNDS,
++ AVR32_OPC_SATRNDU,
++ AVR32_OPC_SATS,
++ AVR32_OPC_SATSUB_H,
++ AVR32_OPC_SATSUB_W1,
++ AVR32_OPC_SATSUB_W2,
++ AVR32_OPC_SATU,
++ AVR32_OPC_SBC,
++ AVR32_OPC_SBR,
++ AVR32_OPC_SCALL,
++ AVR32_OPC_SCR,
++ AVR32_OPC_SLEEP,
++ AVR32_OPC_SREQ,
++ AVR32_OPC_SRNE,
++ AVR32_OPC_SRCC,
++ AVR32_OPC_SRCS,
++ AVR32_OPC_SRGE,
++ AVR32_OPC_SRLT,
++ AVR32_OPC_SRMI,
++ AVR32_OPC_SRPL,
++ AVR32_OPC_SRLS,
++ AVR32_OPC_SRGT,
++ AVR32_OPC_SRLE,
++ AVR32_OPC_SRHI,
++ AVR32_OPC_SRVS,
++ AVR32_OPC_SRVC,
++ AVR32_OPC_SRQS,
++ AVR32_OPC_SRAL,
++ AVR32_OPC_SSRF,
++ AVR32_OPC_ST_B1,
++ AVR32_OPC_ST_B2,
++ AVR32_OPC_ST_B5,
++ AVR32_OPC_ST_B3,
++ AVR32_OPC_ST_B4,
++ AVR32_OPC_ST_D1,
++ AVR32_OPC_ST_D2,
++ AVR32_OPC_ST_D3,
++ AVR32_OPC_ST_D5,
++ AVR32_OPC_ST_D4,
++ AVR32_OPC_ST_H1,
++ AVR32_OPC_ST_H2,
++ AVR32_OPC_ST_H5,
++ AVR32_OPC_ST_H3,
++ AVR32_OPC_ST_H4,
++ AVR32_OPC_ST_W1,
++ AVR32_OPC_ST_W2,
++ AVR32_OPC_ST_W5,
++ AVR32_OPC_ST_W3,
++ AVR32_OPC_ST_W4,
++ AVR32_OPC_STC_D1,
++ AVR32_OPC_STC_D2,
++ AVR32_OPC_STC_D3,
++ AVR32_OPC_STC_W1,
++ AVR32_OPC_STC_W2,
++ AVR32_OPC_STC_W3,
++ AVR32_OPC_STC0_D,
++ AVR32_OPC_STC0_W,
++ AVR32_OPC_STCM_D,
++ AVR32_OPC_STCM_D_PU,
++ AVR32_OPC_STCM_W,
++ AVR32_OPC_STCM_W_PU,
++ AVR32_OPC_STCOND,
++ AVR32_OPC_STDSP,
++ AVR32_OPC_STHH_W2,
++ AVR32_OPC_STHH_W1,
++ AVR32_OPC_STM,
++ AVR32_OPC_STM_PU,
++ AVR32_OPC_STMTS,
++ AVR32_OPC_STMTS_PU,
++ AVR32_OPC_STSWP_H,
++ AVR32_OPC_STSWP_W,
++ AVR32_OPC_SUB1,
++ AVR32_OPC_SUB2,
++ AVR32_OPC_SUB5,
++ AVR32_OPC_SUB3_SP,
++ AVR32_OPC_SUB3,
++ AVR32_OPC_SUB4,
++ AVR32_OPC_SUBEQ,
++ AVR32_OPC_SUBNE,
++ AVR32_OPC_SUBCC,
++ AVR32_OPC_SUBCS,
++ AVR32_OPC_SUBGE,
++ AVR32_OPC_SUBLT,
++ AVR32_OPC_SUBMI,
++ AVR32_OPC_SUBPL,
++ AVR32_OPC_SUBLS,
++ AVR32_OPC_SUBGT,
++ AVR32_OPC_SUBLE,
++ AVR32_OPC_SUBHI,
++ AVR32_OPC_SUBVS,
++ AVR32_OPC_SUBVC,
++ AVR32_OPC_SUBQS,
++ AVR32_OPC_SUBAL,
++ AVR32_OPC_SUBFEQ,
++ AVR32_OPC_SUBFNE,
++ AVR32_OPC_SUBFCC,
++ AVR32_OPC_SUBFCS,
++ AVR32_OPC_SUBFGE,
++ AVR32_OPC_SUBFLT,
++ AVR32_OPC_SUBFMI,
++ AVR32_OPC_SUBFPL,
++ AVR32_OPC_SUBFLS,
++ AVR32_OPC_SUBFGT,
++ AVR32_OPC_SUBFLE,
++ AVR32_OPC_SUBFHI,
++ AVR32_OPC_SUBFVS,
++ AVR32_OPC_SUBFVC,
++ AVR32_OPC_SUBFQS,
++ AVR32_OPC_SUBFAL,
++ AVR32_OPC_SUBHH_W,
++ AVR32_OPC_SWAP_B,
++ AVR32_OPC_SWAP_BH,
++ AVR32_OPC_SWAP_H,
++ AVR32_OPC_SYNC,
++ AVR32_OPC_TLBR,
++ AVR32_OPC_TLBS,
++ AVR32_OPC_TLBW,
++ AVR32_OPC_TNBZ,
++ AVR32_OPC_TST,
++ AVR32_OPC_XCHG,
++ AVR32_OPC_MEMC,
++ AVR32_OPC_MEMS,
++ AVR32_OPC_MEMT,
++ AVR32_OPC_BFEXTS,
++ AVR32_OPC_BFEXTU,
++ AVR32_OPC_BFINS,
++ AVR32_OPC_RSUBEQ,
++ AVR32_OPC_RSUBNE,
++ AVR32_OPC_RSUBCC,
++ AVR32_OPC_RSUBCS,
++ AVR32_OPC_RSUBGE,
++ AVR32_OPC_RSUBLT,
++ AVR32_OPC_RSUBMI,
++ AVR32_OPC_RSUBPL,
++ AVR32_OPC_RSUBLS,
++ AVR32_OPC_RSUBGT,
++ AVR32_OPC_RSUBLE,
++ AVR32_OPC_RSUBHI,
++ AVR32_OPC_RSUBVS,
++ AVR32_OPC_RSUBVC,
++ AVR32_OPC_RSUBQS,
++ AVR32_OPC_RSUBAL,
++ AVR32_OPC_ADDEQ,
++ AVR32_OPC_ADDNE,
++ AVR32_OPC_ADDCC,
++ AVR32_OPC_ADDCS,
++ AVR32_OPC_ADDGE,
++ AVR32_OPC_ADDLT,
++ AVR32_OPC_ADDMI,
++ AVR32_OPC_ADDPL,
++ AVR32_OPC_ADDLS,
++ AVR32_OPC_ADDGT,
++ AVR32_OPC_ADDLE,
++ AVR32_OPC_ADDHI,
++ AVR32_OPC_ADDVS,
++ AVR32_OPC_ADDVC,
++ AVR32_OPC_ADDQS,
++ AVR32_OPC_ADDAL,
++ AVR32_OPC_SUB2EQ,
++ AVR32_OPC_SUB2NE,
++ AVR32_OPC_SUB2CC,
++ AVR32_OPC_SUB2CS,
++ AVR32_OPC_SUB2GE,
++ AVR32_OPC_SUB2LT,
++ AVR32_OPC_SUB2MI,
++ AVR32_OPC_SUB2PL,
++ AVR32_OPC_SUB2LS,
++ AVR32_OPC_SUB2GT,
++ AVR32_OPC_SUB2LE,
++ AVR32_OPC_SUB2HI,
++ AVR32_OPC_SUB2VS,
++ AVR32_OPC_SUB2VC,
++ AVR32_OPC_SUB2QS,
++ AVR32_OPC_SUB2AL,
++ AVR32_OPC_ANDEQ,
++ AVR32_OPC_ANDNE,
++ AVR32_OPC_ANDCC,
++ AVR32_OPC_ANDCS,
++ AVR32_OPC_ANDGE,
++ AVR32_OPC_ANDLT,
++ AVR32_OPC_ANDMI,
++ AVR32_OPC_ANDPL,
++ AVR32_OPC_ANDLS,
++ AVR32_OPC_ANDGT,
++ AVR32_OPC_ANDLE,
++ AVR32_OPC_ANDHI,
++ AVR32_OPC_ANDVS,
++ AVR32_OPC_ANDVC,
++ AVR32_OPC_ANDQS,
++ AVR32_OPC_ANDAL,
++ AVR32_OPC_OREQ,
++ AVR32_OPC_ORNE,
++ AVR32_OPC_ORCC,
++ AVR32_OPC_ORCS,
++ AVR32_OPC_ORGE,
++ AVR32_OPC_ORLT,
++ AVR32_OPC_ORMI,
++ AVR32_OPC_ORPL,
++ AVR32_OPC_ORLS,
++ AVR32_OPC_ORGT,
++ AVR32_OPC_ORLE,
++ AVR32_OPC_ORHI,
++ AVR32_OPC_ORVS,
++ AVR32_OPC_ORVC,
++ AVR32_OPC_ORQS,
++ AVR32_OPC_ORAL,
++ AVR32_OPC_EOREQ,
++ AVR32_OPC_EORNE,
++ AVR32_OPC_EORCC,
++ AVR32_OPC_EORCS,
++ AVR32_OPC_EORGE,
++ AVR32_OPC_EORLT,
++ AVR32_OPC_EORMI,
++ AVR32_OPC_EORPL,
++ AVR32_OPC_EORLS,
++ AVR32_OPC_EORGT,
++ AVR32_OPC_EORLE,
++ AVR32_OPC_EORHI,
++ AVR32_OPC_EORVS,
++ AVR32_OPC_EORVC,
++ AVR32_OPC_EORQS,
++ AVR32_OPC_EORAL,
++ AVR32_OPC_LD_WEQ,
++ AVR32_OPC_LD_WNE,
++ AVR32_OPC_LD_WCC,
++ AVR32_OPC_LD_WCS,
++ AVR32_OPC_LD_WGE,
++ AVR32_OPC_LD_WLT,
++ AVR32_OPC_LD_WMI,
++ AVR32_OPC_LD_WPL,
++ AVR32_OPC_LD_WLS,
++ AVR32_OPC_LD_WGT,
++ AVR32_OPC_LD_WLE,
++ AVR32_OPC_LD_WHI,
++ AVR32_OPC_LD_WVS,
++ AVR32_OPC_LD_WVC,
++ AVR32_OPC_LD_WQS,
++ AVR32_OPC_LD_WAL,
++ AVR32_OPC_LD_SHEQ,
++ AVR32_OPC_LD_SHNE,
++ AVR32_OPC_LD_SHCC,
++ AVR32_OPC_LD_SHCS,
++ AVR32_OPC_LD_SHGE,
++ AVR32_OPC_LD_SHLT,
++ AVR32_OPC_LD_SHMI,
++ AVR32_OPC_LD_SHPL,
++ AVR32_OPC_LD_SHLS,
++ AVR32_OPC_LD_SHGT,
++ AVR32_OPC_LD_SHLE,
++ AVR32_OPC_LD_SHHI,
++ AVR32_OPC_LD_SHVS,
++ AVR32_OPC_LD_SHVC,
++ AVR32_OPC_LD_SHQS,
++ AVR32_OPC_LD_SHAL,
++ AVR32_OPC_LD_UHEQ,
++ AVR32_OPC_LD_UHNE,
++ AVR32_OPC_LD_UHCC,
++ AVR32_OPC_LD_UHCS,
++ AVR32_OPC_LD_UHGE,
++ AVR32_OPC_LD_UHLT,
++ AVR32_OPC_LD_UHMI,
++ AVR32_OPC_LD_UHPL,
++ AVR32_OPC_LD_UHLS,
++ AVR32_OPC_LD_UHGT,
++ AVR32_OPC_LD_UHLE,
++ AVR32_OPC_LD_UHHI,
++ AVR32_OPC_LD_UHVS,
++ AVR32_OPC_LD_UHVC,
++ AVR32_OPC_LD_UHQS,
++ AVR32_OPC_LD_UHAL,
++ AVR32_OPC_LD_SBEQ,
++ AVR32_OPC_LD_SBNE,
++ AVR32_OPC_LD_SBCC,
++ AVR32_OPC_LD_SBCS,
++ AVR32_OPC_LD_SBGE,
++ AVR32_OPC_LD_SBLT,
++ AVR32_OPC_LD_SBMI,
++ AVR32_OPC_LD_SBPL,
++ AVR32_OPC_LD_SBLS,
++ AVR32_OPC_LD_SBGT,
++ AVR32_OPC_LD_SBLE,
++ AVR32_OPC_LD_SBHI,
++ AVR32_OPC_LD_SBVS,
++ AVR32_OPC_LD_SBVC,
++ AVR32_OPC_LD_SBQS,
++ AVR32_OPC_LD_SBAL,
++ AVR32_OPC_LD_UBEQ,
++ AVR32_OPC_LD_UBNE,
++ AVR32_OPC_LD_UBCC,
++ AVR32_OPC_LD_UBCS,
++ AVR32_OPC_LD_UBGE,
++ AVR32_OPC_LD_UBLT,
++ AVR32_OPC_LD_UBMI,
++ AVR32_OPC_LD_UBPL,
++ AVR32_OPC_LD_UBLS,
++ AVR32_OPC_LD_UBGT,
++ AVR32_OPC_LD_UBLE,
++ AVR32_OPC_LD_UBHI,
++ AVR32_OPC_LD_UBVS,
++ AVR32_OPC_LD_UBVC,
++ AVR32_OPC_LD_UBQS,
++ AVR32_OPC_LD_UBAL,
++ AVR32_OPC_ST_WEQ,
++ AVR32_OPC_ST_WNE,
++ AVR32_OPC_ST_WCC,
++ AVR32_OPC_ST_WCS,
++ AVR32_OPC_ST_WGE,
++ AVR32_OPC_ST_WLT,
++ AVR32_OPC_ST_WMI,
++ AVR32_OPC_ST_WPL,
++ AVR32_OPC_ST_WLS,
++ AVR32_OPC_ST_WGT,
++ AVR32_OPC_ST_WLE,
++ AVR32_OPC_ST_WHI,
++ AVR32_OPC_ST_WVS,
++ AVR32_OPC_ST_WVC,
++ AVR32_OPC_ST_WQS,
++ AVR32_OPC_ST_WAL,
++ AVR32_OPC_ST_HEQ,
++ AVR32_OPC_ST_HNE,
++ AVR32_OPC_ST_HCC,
++ AVR32_OPC_ST_HCS,
++ AVR32_OPC_ST_HGE,
++ AVR32_OPC_ST_HLT,
++ AVR32_OPC_ST_HMI,
++ AVR32_OPC_ST_HPL,
++ AVR32_OPC_ST_HLS,
++ AVR32_OPC_ST_HGT,
++ AVR32_OPC_ST_HLE,
++ AVR32_OPC_ST_HHI,
++ AVR32_OPC_ST_HVS,
++ AVR32_OPC_ST_HVC,
++ AVR32_OPC_ST_HQS,
++ AVR32_OPC_ST_HAL,
++ AVR32_OPC_ST_BEQ,
++ AVR32_OPC_ST_BNE,
++ AVR32_OPC_ST_BCC,
++ AVR32_OPC_ST_BCS,
++ AVR32_OPC_ST_BGE,
++ AVR32_OPC_ST_BLT,
++ AVR32_OPC_ST_BMI,
++ AVR32_OPC_ST_BPL,
++ AVR32_OPC_ST_BLS,
++ AVR32_OPC_ST_BGT,
++ AVR32_OPC_ST_BLE,
++ AVR32_OPC_ST_BHI,
++ AVR32_OPC_ST_BVS,
++ AVR32_OPC_ST_BVC,
++ AVR32_OPC_ST_BQS,
++ AVR32_OPC_ST_BAL,
++ AVR32_OPC_MOVH,
++ AVR32_OPC_SSCALL,
++ AVR32_OPC_RETSS,
++ AVR32_OPC_FMAC_S,
++ AVR32_OPC_FNMAC_S,
++ AVR32_OPC_FMSC_S,
++ AVR32_OPC_FNMSC_S,
++ AVR32_OPC_FMUL_S,
++ AVR32_OPC_FNMUL_S,
++ AVR32_OPC_FADD_S,
++ AVR32_OPC_FSUB_S,
++ AVR32_OPC_FCASTRS_SW,
++ AVR32_OPC_FCASTRS_UW,
++ AVR32_OPC_FCASTSW_S,
++ AVR32_OPC_FCASTUW_S,
++ AVR32_OPC_FCMP_S,
++ AVR32_OPC_FCHK_S,
++ AVR32_OPC_FRCPA_S,
++ AVR32_OPC_FRSQRTA_S,
++ AVR32_OPC__END_
++};
++#define AVR32_NR_OPCODES AVR32_OPC__END_
++
++enum avr32_syntax_type
++{
++ AVR32_SYNTAX_ABS,
++ AVR32_SYNTAX_ACALL,
++ AVR32_SYNTAX_ACR,
++ AVR32_SYNTAX_ADC,
++ AVR32_SYNTAX_ADD1,
++ AVR32_SYNTAX_ADD2,
++ AVR32_SYNTAX_ADDABS,
++ AVR32_SYNTAX_ADDHH_W,
++ AVR32_SYNTAX_AND1,
++ AVR32_SYNTAX_AND2,
++ AVR32_SYNTAX_AND3,
++ AVR32_SYNTAX_ANDH,
++ AVR32_SYNTAX_ANDH_COH,
++ AVR32_SYNTAX_ANDL,
++ AVR32_SYNTAX_ANDL_COH,
++ AVR32_SYNTAX_ANDN,
++ AVR32_SYNTAX_ASR1,
++ AVR32_SYNTAX_ASR3,
++ AVR32_SYNTAX_ASR2,
++ AVR32_SYNTAX_BFEXTS,
++ AVR32_SYNTAX_BFEXTU,
++ AVR32_SYNTAX_BFINS,
++ AVR32_SYNTAX_BLD,
++ AVR32_SYNTAX_BREQ1,
++ AVR32_SYNTAX_BRNE1,
++ AVR32_SYNTAX_BRCC1,
++ AVR32_SYNTAX_BRCS1,
++ AVR32_SYNTAX_BRGE1,
++ AVR32_SYNTAX_BRLT1,
++ AVR32_SYNTAX_BRMI1,
++ AVR32_SYNTAX_BRPL1,
++ AVR32_SYNTAX_BRHS1,
++ AVR32_SYNTAX_BRLO1,
++ AVR32_SYNTAX_BREQ2,
++ AVR32_SYNTAX_BRNE2,
++ AVR32_SYNTAX_BRCC2,
++ AVR32_SYNTAX_BRCS2,
++ AVR32_SYNTAX_BRGE2,
++ AVR32_SYNTAX_BRLT2,
++ AVR32_SYNTAX_BRMI2,
++ AVR32_SYNTAX_BRPL2,
++ AVR32_SYNTAX_BRLS,
++ AVR32_SYNTAX_BRGT,
++ AVR32_SYNTAX_BRLE,
++ AVR32_SYNTAX_BRHI,
++ AVR32_SYNTAX_BRVS,
++ AVR32_SYNTAX_BRVC,
++ AVR32_SYNTAX_BRQS,
++ AVR32_SYNTAX_BRAL,
++ AVR32_SYNTAX_BRHS2,
++ AVR32_SYNTAX_BRLO2,
++ AVR32_SYNTAX_BREAKPOINT,
++ AVR32_SYNTAX_BREV,
++ AVR32_SYNTAX_BST,
++ AVR32_SYNTAX_CACHE,
++ AVR32_SYNTAX_CASTS_B,
++ AVR32_SYNTAX_CASTS_H,
++ AVR32_SYNTAX_CASTU_B,
++ AVR32_SYNTAX_CASTU_H,
++ AVR32_SYNTAX_CBR,
++ AVR32_SYNTAX_CLZ,
++ AVR32_SYNTAX_COM,
++ AVR32_SYNTAX_COP,
++ AVR32_SYNTAX_CP_B,
++ AVR32_SYNTAX_CP_H,
++ AVR32_SYNTAX_CP_W1,
++ AVR32_SYNTAX_CP_W2,
++ AVR32_SYNTAX_CP_W3,
++ AVR32_SYNTAX_CPC1,
++ AVR32_SYNTAX_CPC2,
++ AVR32_SYNTAX_CSRF,
++ AVR32_SYNTAX_CSRFCZ,
++ AVR32_SYNTAX_DIVS,
++ AVR32_SYNTAX_DIVU,
++ AVR32_SYNTAX_EOR1,
++ AVR32_SYNTAX_EOR2,
++ AVR32_SYNTAX_EOR3,
++ AVR32_SYNTAX_EORL,
++ AVR32_SYNTAX_EORH,
++ AVR32_SYNTAX_FRS,
++ AVR32_SYNTAX_SSCALL,
++ AVR32_SYNTAX_RETSS,
++ AVR32_SYNTAX_ICALL,
++ AVR32_SYNTAX_INCJOSP,
++ AVR32_SYNTAX_LD_D1,
++ AVR32_SYNTAX_LD_D2,
++ AVR32_SYNTAX_LD_D3,
++ AVR32_SYNTAX_LD_D5,
++ AVR32_SYNTAX_LD_D4,
++ AVR32_SYNTAX_LD_SB2,
++ AVR32_SYNTAX_LD_SB1,
++ AVR32_SYNTAX_LD_UB1,
++ AVR32_SYNTAX_LD_UB2,
++ AVR32_SYNTAX_LD_UB5,
++ AVR32_SYNTAX_LD_UB3,
++ AVR32_SYNTAX_LD_UB4,
++ AVR32_SYNTAX_LD_SH1,
++ AVR32_SYNTAX_LD_SH2,
++ AVR32_SYNTAX_LD_SH5,
++ AVR32_SYNTAX_LD_SH3,
++ AVR32_SYNTAX_LD_SH4,
++ AVR32_SYNTAX_LD_UH1,
++ AVR32_SYNTAX_LD_UH2,
++ AVR32_SYNTAX_LD_UH5,
++ AVR32_SYNTAX_LD_UH3,
++ AVR32_SYNTAX_LD_UH4,
++ AVR32_SYNTAX_LD_W1,
++ AVR32_SYNTAX_LD_W2,
++ AVR32_SYNTAX_LD_W5,
++ AVR32_SYNTAX_LD_W6,
++ AVR32_SYNTAX_LD_W3,
++ AVR32_SYNTAX_LD_W4,
++ AVR32_SYNTAX_LDC_D1,
++ AVR32_SYNTAX_LDC_D2,
++ AVR32_SYNTAX_LDC_D3,
++ AVR32_SYNTAX_LDC_W1,
++ AVR32_SYNTAX_LDC_W2,
++ AVR32_SYNTAX_LDC_W3,
++ AVR32_SYNTAX_LDC0_D,
++ AVR32_SYNTAX_LDC0_W,
++ AVR32_SYNTAX_LDCM_D,
++ AVR32_SYNTAX_LDCM_D_PU,
++ AVR32_SYNTAX_LDCM_W,
++ AVR32_SYNTAX_LDCM_W_PU,
++ AVR32_SYNTAX_LDDPC,
++ AVR32_SYNTAX_LDDPC_EXT,
++ AVR32_SYNTAX_LDDSP,
++ AVR32_SYNTAX_LDINS_B,
++ AVR32_SYNTAX_LDINS_H,
++ AVR32_SYNTAX_LDM,
++ AVR32_SYNTAX_LDMTS,
++ AVR32_SYNTAX_LDMTS_PU,
++ AVR32_SYNTAX_LDSWP_SH,
++ AVR32_SYNTAX_LDSWP_UH,
++ AVR32_SYNTAX_LDSWP_W,
++ AVR32_SYNTAX_LSL1,
++ AVR32_SYNTAX_LSL3,
++ AVR32_SYNTAX_LSL2,
++ AVR32_SYNTAX_LSR1,
++ AVR32_SYNTAX_LSR3,
++ AVR32_SYNTAX_LSR2,
++ AVR32_SYNTAX_MAC,
++ AVR32_SYNTAX_MACHH_D,
++ AVR32_SYNTAX_MACHH_W,
++ AVR32_SYNTAX_MACS_D,
++ AVR32_SYNTAX_MACSATHH_W,
++ AVR32_SYNTAX_MACUD,
++ AVR32_SYNTAX_MACWH_D,
++ AVR32_SYNTAX_MAX,
++ AVR32_SYNTAX_MCALL,
++ AVR32_SYNTAX_MFDR,
++ AVR32_SYNTAX_MFSR,
++ AVR32_SYNTAX_MIN,
++ AVR32_SYNTAX_MOV3,
++ AVR32_SYNTAX_MOV1,
++ AVR32_SYNTAX_MOV2,
++ AVR32_SYNTAX_MOVEQ1,
++ AVR32_SYNTAX_MOVNE1,
++ AVR32_SYNTAX_MOVCC1,
++ AVR32_SYNTAX_MOVCS1,
++ AVR32_SYNTAX_MOVGE1,
++ AVR32_SYNTAX_MOVLT1,
++ AVR32_SYNTAX_MOVMI1,
++ AVR32_SYNTAX_MOVPL1,
++ AVR32_SYNTAX_MOVLS1,
++ AVR32_SYNTAX_MOVGT1,
++ AVR32_SYNTAX_MOVLE1,
++ AVR32_SYNTAX_MOVHI1,
++ AVR32_SYNTAX_MOVVS1,
++ AVR32_SYNTAX_MOVVC1,
++ AVR32_SYNTAX_MOVQS1,
++ AVR32_SYNTAX_MOVAL1,
++ AVR32_SYNTAX_MOVHS1,
++ AVR32_SYNTAX_MOVLO1,
++ AVR32_SYNTAX_MOVEQ2,
++ AVR32_SYNTAX_MOVNE2,
++ AVR32_SYNTAX_MOVCC2,
++ AVR32_SYNTAX_MOVCS2,
++ AVR32_SYNTAX_MOVGE2,
++ AVR32_SYNTAX_MOVLT2,
++ AVR32_SYNTAX_MOVMI2,
++ AVR32_SYNTAX_MOVPL2,
++ AVR32_SYNTAX_MOVLS2,
++ AVR32_SYNTAX_MOVGT2,
++ AVR32_SYNTAX_MOVLE2,
++ AVR32_SYNTAX_MOVHI2,
++ AVR32_SYNTAX_MOVVS2,
++ AVR32_SYNTAX_MOVVC2,
++ AVR32_SYNTAX_MOVQS2,
++ AVR32_SYNTAX_MOVAL2,
++ AVR32_SYNTAX_MOVHS2,
++ AVR32_SYNTAX_MOVLO2,
++ AVR32_SYNTAX_MTDR,
++ AVR32_SYNTAX_MTSR,
++ AVR32_SYNTAX_MUL1,
++ AVR32_SYNTAX_MUL2,
++ AVR32_SYNTAX_MUL3,
++ AVR32_SYNTAX_MULHH_W,
++ AVR32_SYNTAX_MULNHH_W,
++ AVR32_SYNTAX_MULNWH_D,
++ AVR32_SYNTAX_MULSD,
++ AVR32_SYNTAX_MULSATHH_H,
++ AVR32_SYNTAX_MULSATHH_W,
++ AVR32_SYNTAX_MULSATRNDHH_H,
++ AVR32_SYNTAX_MULSATRNDWH_W,
++ AVR32_SYNTAX_MULSATWH_W,
++ AVR32_SYNTAX_MULU_D,
++ AVR32_SYNTAX_MULWH_D,
++ AVR32_SYNTAX_MUSFR,
++ AVR32_SYNTAX_MUSTR,
++ AVR32_SYNTAX_MVCR_D,
++ AVR32_SYNTAX_MVCR_W,
++ AVR32_SYNTAX_MVRC_D,
++ AVR32_SYNTAX_MVRC_W,
++ AVR32_SYNTAX_NEG,
++ AVR32_SYNTAX_NOP,
++ AVR32_SYNTAX_OR1,
++ AVR32_SYNTAX_OR2,
++ AVR32_SYNTAX_OR3,
++ AVR32_SYNTAX_ORH,
++ AVR32_SYNTAX_ORL,
++ AVR32_SYNTAX_PABS_SB,
++ AVR32_SYNTAX_PABS_SH,
++ AVR32_SYNTAX_PACKSH_SB,
++ AVR32_SYNTAX_PACKSH_UB,
++ AVR32_SYNTAX_PACKW_SH,
++ AVR32_SYNTAX_PADD_B,
++ AVR32_SYNTAX_PADD_H,
++ AVR32_SYNTAX_PADDH_SH,
++ AVR32_SYNTAX_PADDH_UB,
++ AVR32_SYNTAX_PADDS_SB,
++ AVR32_SYNTAX_PADDS_SH,
++ AVR32_SYNTAX_PADDS_UB,
++ AVR32_SYNTAX_PADDS_UH,
++ AVR32_SYNTAX_PADDSUB_H,
++ AVR32_SYNTAX_PADDSUBH_SH,
++ AVR32_SYNTAX_PADDSUBS_SH,
++ AVR32_SYNTAX_PADDSUBS_UH,
++ AVR32_SYNTAX_PADDX_H,
++ AVR32_SYNTAX_PADDXH_SH,
++ AVR32_SYNTAX_PADDXS_SH,
++ AVR32_SYNTAX_PADDXS_UH,
++ AVR32_SYNTAX_PASR_B,
++ AVR32_SYNTAX_PASR_H,
++ AVR32_SYNTAX_PAVG_SH,
++ AVR32_SYNTAX_PAVG_UB,
++ AVR32_SYNTAX_PLSL_B,
++ AVR32_SYNTAX_PLSL_H,
++ AVR32_SYNTAX_PLSR_B,
++ AVR32_SYNTAX_PLSR_H,
++ AVR32_SYNTAX_PMAX_SH,
++ AVR32_SYNTAX_PMAX_UB,
++ AVR32_SYNTAX_PMIN_SH,
++ AVR32_SYNTAX_PMIN_UB,
++ AVR32_SYNTAX_POPJC,
++ AVR32_SYNTAX_POPM,
++ AVR32_SYNTAX_POPM_E,
++ AVR32_SYNTAX_PREF,
++ AVR32_SYNTAX_PSAD,
++ AVR32_SYNTAX_PSUB_B,
++ AVR32_SYNTAX_PSUB_H,
++ AVR32_SYNTAX_PSUBADD_H,
++ AVR32_SYNTAX_PSUBADDH_SH,
++ AVR32_SYNTAX_PSUBADDS_SH,
++ AVR32_SYNTAX_PSUBADDS_UH,
++ AVR32_SYNTAX_PSUBH_SH,
++ AVR32_SYNTAX_PSUBH_UB,
++ AVR32_SYNTAX_PSUBS_SB,
++ AVR32_SYNTAX_PSUBS_SH,
++ AVR32_SYNTAX_PSUBS_UB,
++ AVR32_SYNTAX_PSUBS_UH,
++ AVR32_SYNTAX_PSUBX_H,
++ AVR32_SYNTAX_PSUBXH_SH,
++ AVR32_SYNTAX_PSUBXS_SH,
++ AVR32_SYNTAX_PSUBXS_UH,
++ AVR32_SYNTAX_PUNPCKSB_H,
++ AVR32_SYNTAX_PUNPCKUB_H,
++ AVR32_SYNTAX_PUSHJC,
++ AVR32_SYNTAX_PUSHM,
++ AVR32_SYNTAX_PUSHM_E,
++ AVR32_SYNTAX_RCALL1,
++ AVR32_SYNTAX_RCALL2,
++ AVR32_SYNTAX_RETEQ,
++ AVR32_SYNTAX_RETNE,
++ AVR32_SYNTAX_RETCC,
++ AVR32_SYNTAX_RETCS,
++ AVR32_SYNTAX_RETGE,
++ AVR32_SYNTAX_RETLT,
++ AVR32_SYNTAX_RETMI,
++ AVR32_SYNTAX_RETPL,
++ AVR32_SYNTAX_RETLS,
++ AVR32_SYNTAX_RETGT,
++ AVR32_SYNTAX_RETLE,
++ AVR32_SYNTAX_RETHI,
++ AVR32_SYNTAX_RETVS,
++ AVR32_SYNTAX_RETVC,
++ AVR32_SYNTAX_RETQS,
++ AVR32_SYNTAX_RETAL,
++ AVR32_SYNTAX_RETHS,
++ AVR32_SYNTAX_RETLO,
++ AVR32_SYNTAX_RETD,
++ AVR32_SYNTAX_RETE,
++ AVR32_SYNTAX_RETJ,
++ AVR32_SYNTAX_RETS,
++ AVR32_SYNTAX_RJMP,
++ AVR32_SYNTAX_ROL,
++ AVR32_SYNTAX_ROR,
++ AVR32_SYNTAX_RSUB1,
++ AVR32_SYNTAX_RSUB2,
++ AVR32_SYNTAX_SATADD_H,
++ AVR32_SYNTAX_SATADD_W,
++ AVR32_SYNTAX_SATRNDS,
++ AVR32_SYNTAX_SATRNDU,
++ AVR32_SYNTAX_SATS,
++ AVR32_SYNTAX_SATSUB_H,
++ AVR32_SYNTAX_SATSUB_W1,
++ AVR32_SYNTAX_SATSUB_W2,
++ AVR32_SYNTAX_SATU,
++ AVR32_SYNTAX_SBC,
++ AVR32_SYNTAX_SBR,
++ AVR32_SYNTAX_SCALL,
++ AVR32_SYNTAX_SCR,
++ AVR32_SYNTAX_SLEEP,
++ AVR32_SYNTAX_SREQ,
++ AVR32_SYNTAX_SRNE,
++ AVR32_SYNTAX_SRCC,
++ AVR32_SYNTAX_SRCS,
++ AVR32_SYNTAX_SRGE,
++ AVR32_SYNTAX_SRLT,
++ AVR32_SYNTAX_SRMI,
++ AVR32_SYNTAX_SRPL,
++ AVR32_SYNTAX_SRLS,
++ AVR32_SYNTAX_SRGT,
++ AVR32_SYNTAX_SRLE,
++ AVR32_SYNTAX_SRHI,
++ AVR32_SYNTAX_SRVS,
++ AVR32_SYNTAX_SRVC,
++ AVR32_SYNTAX_SRQS,
++ AVR32_SYNTAX_SRAL,
++ AVR32_SYNTAX_SRHS,
++ AVR32_SYNTAX_SRLO,
++ AVR32_SYNTAX_SSRF,
++ AVR32_SYNTAX_ST_B1,
++ AVR32_SYNTAX_ST_B2,
++ AVR32_SYNTAX_ST_B5,
++ AVR32_SYNTAX_ST_B3,
++ AVR32_SYNTAX_ST_B4,
++ AVR32_SYNTAX_ST_D1,
++ AVR32_SYNTAX_ST_D2,
++ AVR32_SYNTAX_ST_D3,
++ AVR32_SYNTAX_ST_D5,
++ AVR32_SYNTAX_ST_D4,
++ AVR32_SYNTAX_ST_H1,
++ AVR32_SYNTAX_ST_H2,
++ AVR32_SYNTAX_ST_H5,
++ AVR32_SYNTAX_ST_H3,
++ AVR32_SYNTAX_ST_H4,
++ AVR32_SYNTAX_ST_W1,
++ AVR32_SYNTAX_ST_W2,
++ AVR32_SYNTAX_ST_W5,
++ AVR32_SYNTAX_ST_W3,
++ AVR32_SYNTAX_ST_W4,
++ AVR32_SYNTAX_STC_D1,
++ AVR32_SYNTAX_STC_D2,
++ AVR32_SYNTAX_STC_D3,
++ AVR32_SYNTAX_STC_W1,
++ AVR32_SYNTAX_STC_W2,
++ AVR32_SYNTAX_STC_W3,
++ AVR32_SYNTAX_STC0_D,
++ AVR32_SYNTAX_STC0_W,
++ AVR32_SYNTAX_STCM_D,
++ AVR32_SYNTAX_STCM_D_PU,
++ AVR32_SYNTAX_STCM_W,
++ AVR32_SYNTAX_STCM_W_PU,
++ AVR32_SYNTAX_STCOND,
++ AVR32_SYNTAX_STDSP,
++ AVR32_SYNTAX_STHH_W2,
++ AVR32_SYNTAX_STHH_W1,
++ AVR32_SYNTAX_STM,
++ AVR32_SYNTAX_STM_PU,
++ AVR32_SYNTAX_STMTS,
++ AVR32_SYNTAX_STMTS_PU,
++ AVR32_SYNTAX_STSWP_H,
++ AVR32_SYNTAX_STSWP_W,
++ AVR32_SYNTAX_SUB1,
++ AVR32_SYNTAX_SUB2,
++ AVR32_SYNTAX_SUB5,
++ AVR32_SYNTAX_SUB3_SP,
++ AVR32_SYNTAX_SUB3,
++ AVR32_SYNTAX_SUB4,
++ AVR32_SYNTAX_SUBEQ,
++ AVR32_SYNTAX_SUBNE,
++ AVR32_SYNTAX_SUBCC,
++ AVR32_SYNTAX_SUBCS,
++ AVR32_SYNTAX_SUBGE,
++ AVR32_SYNTAX_SUBLT,
++ AVR32_SYNTAX_SUBMI,
++ AVR32_SYNTAX_SUBPL,
++ AVR32_SYNTAX_SUBLS,
++ AVR32_SYNTAX_SUBGT,
++ AVR32_SYNTAX_SUBLE,
++ AVR32_SYNTAX_SUBHI,
++ AVR32_SYNTAX_SUBVS,
++ AVR32_SYNTAX_SUBVC,
++ AVR32_SYNTAX_SUBQS,
++ AVR32_SYNTAX_SUBAL,
++ AVR32_SYNTAX_SUBHS,
++ AVR32_SYNTAX_SUBLO,
++ AVR32_SYNTAX_SUBFEQ,
++ AVR32_SYNTAX_SUBFNE,
++ AVR32_SYNTAX_SUBFCC,
++ AVR32_SYNTAX_SUBFCS,
++ AVR32_SYNTAX_SUBFGE,
++ AVR32_SYNTAX_SUBFLT,
++ AVR32_SYNTAX_SUBFMI,
++ AVR32_SYNTAX_SUBFPL,
++ AVR32_SYNTAX_SUBFLS,
++ AVR32_SYNTAX_SUBFGT,
++ AVR32_SYNTAX_SUBFLE,
++ AVR32_SYNTAX_SUBFHI,
++ AVR32_SYNTAX_SUBFVS,
++ AVR32_SYNTAX_SUBFVC,
++ AVR32_SYNTAX_SUBFQS,
++ AVR32_SYNTAX_SUBFAL,
++ AVR32_SYNTAX_SUBFHS,
++ AVR32_SYNTAX_SUBFLO,
++ AVR32_SYNTAX_SUBHH_W,
++ AVR32_SYNTAX_SWAP_B,
++ AVR32_SYNTAX_SWAP_BH,
++ AVR32_SYNTAX_SWAP_H,
++ AVR32_SYNTAX_SYNC,
++ AVR32_SYNTAX_TLBR,
++ AVR32_SYNTAX_TLBS,
++ AVR32_SYNTAX_TLBW,
++ AVR32_SYNTAX_TNBZ,
++ AVR32_SYNTAX_TST,
++ AVR32_SYNTAX_XCHG,
++ AVR32_SYNTAX_MEMC,
++ AVR32_SYNTAX_MEMS,
++ AVR32_SYNTAX_MEMT,
++ AVR32_SYNTAX_FMAC_S,
++ AVR32_SYNTAX_FNMAC_S,
++ AVR32_SYNTAX_FMSC_S,
++ AVR32_SYNTAX_FNMSC_S,
++ AVR32_SYNTAX_FMUL_S,
++ AVR32_SYNTAX_FNMUL_S,
++ AVR32_SYNTAX_FADD_S,
++ AVR32_SYNTAX_FSUB_S,
++ AVR32_SYNTAX_FCASTRS_SW,
++ AVR32_SYNTAX_FCASTRS_UW,
++ AVR32_SYNTAX_FCASTSW_S,
++ AVR32_SYNTAX_FCASTUW_S,
++ AVR32_SYNTAX_FCMP_S,
++ AVR32_SYNTAX_FCHK_S,
++ AVR32_SYNTAX_FRCPA_S,
++ AVR32_SYNTAX_FRSQRTA_S,
++ AVR32_SYNTAX_LDA_W,
++ AVR32_SYNTAX_CALL,
++ AVR32_SYNTAX_PICOSVMAC0,
++ AVR32_SYNTAX_PICOSVMAC1,
++ AVR32_SYNTAX_PICOSVMAC2,
++ AVR32_SYNTAX_PICOSVMAC3,
++ AVR32_SYNTAX_PICOSVMUL0,
++ AVR32_SYNTAX_PICOSVMUL1,
++ AVR32_SYNTAX_PICOSVMUL2,
++ AVR32_SYNTAX_PICOSVMUL3,
++ AVR32_SYNTAX_PICOVMAC0,
++ AVR32_SYNTAX_PICOVMAC1,
++ AVR32_SYNTAX_PICOVMAC2,
++ AVR32_SYNTAX_PICOVMAC3,
++ AVR32_SYNTAX_PICOVMUL0,
++ AVR32_SYNTAX_PICOVMUL1,
++ AVR32_SYNTAX_PICOVMUL2,
++ AVR32_SYNTAX_PICOVMUL3,
++ AVR32_SYNTAX_PICOLD_D2,
++ AVR32_SYNTAX_PICOLD_D3,
++ AVR32_SYNTAX_PICOLD_D1,
++ AVR32_SYNTAX_PICOLD_W2,
++ AVR32_SYNTAX_PICOLD_W3,
++ AVR32_SYNTAX_PICOLD_W1,
++ AVR32_SYNTAX_PICOLDM_D,
++ AVR32_SYNTAX_PICOLDM_D_PU,
++ AVR32_SYNTAX_PICOLDM_W,
++ AVR32_SYNTAX_PICOLDM_W_PU,
++ AVR32_SYNTAX_PICOMV_D1,
++ AVR32_SYNTAX_PICOMV_D2,
++ AVR32_SYNTAX_PICOMV_W1,
++ AVR32_SYNTAX_PICOMV_W2,
++ AVR32_SYNTAX_PICOST_D2,
++ AVR32_SYNTAX_PICOST_D3,
++ AVR32_SYNTAX_PICOST_D1,
++ AVR32_SYNTAX_PICOST_W2,
++ AVR32_SYNTAX_PICOST_W3,
++ AVR32_SYNTAX_PICOST_W1,
++ AVR32_SYNTAX_PICOSTM_D,
++ AVR32_SYNTAX_PICOSTM_D_PU,
++ AVR32_SYNTAX_PICOSTM_W,
++ AVR32_SYNTAX_PICOSTM_W_PU,
++ AVR32_SYNTAX_RSUBEQ,
++ AVR32_SYNTAX_RSUBNE,
++ AVR32_SYNTAX_RSUBCC,
++ AVR32_SYNTAX_RSUBCS,
++ AVR32_SYNTAX_RSUBGE,
++ AVR32_SYNTAX_RSUBLT,
++ AVR32_SYNTAX_RSUBMI,
++ AVR32_SYNTAX_RSUBPL,
++ AVR32_SYNTAX_RSUBLS,
++ AVR32_SYNTAX_RSUBGT,
++ AVR32_SYNTAX_RSUBLE,
++ AVR32_SYNTAX_RSUBHI,
++ AVR32_SYNTAX_RSUBVS,
++ AVR32_SYNTAX_RSUBVC,
++ AVR32_SYNTAX_RSUBQS,
++ AVR32_SYNTAX_RSUBAL,
++ AVR32_SYNTAX_RSUBHS,
++ AVR32_SYNTAX_RSUBLO,
++ AVR32_SYNTAX_ADDEQ,
++ AVR32_SYNTAX_ADDNE,
++ AVR32_SYNTAX_ADDCC,
++ AVR32_SYNTAX_ADDCS,
++ AVR32_SYNTAX_ADDGE,
++ AVR32_SYNTAX_ADDLT,
++ AVR32_SYNTAX_ADDMI,
++ AVR32_SYNTAX_ADDPL,
++ AVR32_SYNTAX_ADDLS,
++ AVR32_SYNTAX_ADDGT,
++ AVR32_SYNTAX_ADDLE,
++ AVR32_SYNTAX_ADDHI,
++ AVR32_SYNTAX_ADDVS,
++ AVR32_SYNTAX_ADDVC,
++ AVR32_SYNTAX_ADDQS,
++ AVR32_SYNTAX_ADDAL,
++ AVR32_SYNTAX_ADDHS,
++ AVR32_SYNTAX_ADDLO,
++ AVR32_SYNTAX_SUB2EQ,
++ AVR32_SYNTAX_SUB2NE,
++ AVR32_SYNTAX_SUB2CC,
++ AVR32_SYNTAX_SUB2CS,
++ AVR32_SYNTAX_SUB2GE,
++ AVR32_SYNTAX_SUB2LT,
++ AVR32_SYNTAX_SUB2MI,
++ AVR32_SYNTAX_SUB2PL,
++ AVR32_SYNTAX_SUB2LS,
++ AVR32_SYNTAX_SUB2GT,
++ AVR32_SYNTAX_SUB2LE,
++ AVR32_SYNTAX_SUB2HI,
++ AVR32_SYNTAX_SUB2VS,
++ AVR32_SYNTAX_SUB2VC,
++ AVR32_SYNTAX_SUB2QS,
++ AVR32_SYNTAX_SUB2AL,
++ AVR32_SYNTAX_SUB2HS,
++ AVR32_SYNTAX_SUB2LO,
++ AVR32_SYNTAX_ANDEQ,
++ AVR32_SYNTAX_ANDNE,
++ AVR32_SYNTAX_ANDCC,
++ AVR32_SYNTAX_ANDCS,
++ AVR32_SYNTAX_ANDGE,
++ AVR32_SYNTAX_ANDLT,
++ AVR32_SYNTAX_ANDMI,
++ AVR32_SYNTAX_ANDPL,
++ AVR32_SYNTAX_ANDLS,
++ AVR32_SYNTAX_ANDGT,
++ AVR32_SYNTAX_ANDLE,
++ AVR32_SYNTAX_ANDHI,
++ AVR32_SYNTAX_ANDVS,
++ AVR32_SYNTAX_ANDVC,
++ AVR32_SYNTAX_ANDQS,
++ AVR32_SYNTAX_ANDAL,
++ AVR32_SYNTAX_ANDHS,
++ AVR32_SYNTAX_ANDLO,
++ AVR32_SYNTAX_OREQ,
++ AVR32_SYNTAX_ORNE,
++ AVR32_SYNTAX_ORCC,
++ AVR32_SYNTAX_ORCS,
++ AVR32_SYNTAX_ORGE,
++ AVR32_SYNTAX_ORLT,
++ AVR32_SYNTAX_ORMI,
++ AVR32_SYNTAX_ORPL,
++ AVR32_SYNTAX_ORLS,
++ AVR32_SYNTAX_ORGT,
++ AVR32_SYNTAX_ORLE,
++ AVR32_SYNTAX_ORHI,
++ AVR32_SYNTAX_ORVS,
++ AVR32_SYNTAX_ORVC,
++ AVR32_SYNTAX_ORQS,
++ AVR32_SYNTAX_ORAL,
++ AVR32_SYNTAX_ORHS,
++ AVR32_SYNTAX_ORLO,
++ AVR32_SYNTAX_EOREQ,
++ AVR32_SYNTAX_EORNE,
++ AVR32_SYNTAX_EORCC,
++ AVR32_SYNTAX_EORCS,
++ AVR32_SYNTAX_EORGE,
++ AVR32_SYNTAX_EORLT,
++ AVR32_SYNTAX_EORMI,
++ AVR32_SYNTAX_EORPL,
++ AVR32_SYNTAX_EORLS,
++ AVR32_SYNTAX_EORGT,
++ AVR32_SYNTAX_EORLE,
++ AVR32_SYNTAX_EORHI,
++ AVR32_SYNTAX_EORVS,
++ AVR32_SYNTAX_EORVC,
++ AVR32_SYNTAX_EORQS,
++ AVR32_SYNTAX_EORAL,
++ AVR32_SYNTAX_EORHS,
++ AVR32_SYNTAX_EORLO,
++ AVR32_SYNTAX_LD_WEQ,
++ AVR32_SYNTAX_LD_WNE,
++ AVR32_SYNTAX_LD_WCC,
++ AVR32_SYNTAX_LD_WCS,
++ AVR32_SYNTAX_LD_WGE,
++ AVR32_SYNTAX_LD_WLT,
++ AVR32_SYNTAX_LD_WMI,
++ AVR32_SYNTAX_LD_WPL,
++ AVR32_SYNTAX_LD_WLS,
++ AVR32_SYNTAX_LD_WGT,
++ AVR32_SYNTAX_LD_WLE,
++ AVR32_SYNTAX_LD_WHI,
++ AVR32_SYNTAX_LD_WVS,
++ AVR32_SYNTAX_LD_WVC,
++ AVR32_SYNTAX_LD_WQS,
++ AVR32_SYNTAX_LD_WAL,
++ AVR32_SYNTAX_LD_WHS,
++ AVR32_SYNTAX_LD_WLO,
++ AVR32_SYNTAX_LD_SHEQ,
++ AVR32_SYNTAX_LD_SHNE,
++ AVR32_SYNTAX_LD_SHCC,
++ AVR32_SYNTAX_LD_SHCS,
++ AVR32_SYNTAX_LD_SHGE,
++ AVR32_SYNTAX_LD_SHLT,
++ AVR32_SYNTAX_LD_SHMI,
++ AVR32_SYNTAX_LD_SHPL,
++ AVR32_SYNTAX_LD_SHLS,
++ AVR32_SYNTAX_LD_SHGT,
++ AVR32_SYNTAX_LD_SHLE,
++ AVR32_SYNTAX_LD_SHHI,
++ AVR32_SYNTAX_LD_SHVS,
++ AVR32_SYNTAX_LD_SHVC,
++ AVR32_SYNTAX_LD_SHQS,
++ AVR32_SYNTAX_LD_SHAL,
++ AVR32_SYNTAX_LD_SHHS,
++ AVR32_SYNTAX_LD_SHLO,
++ AVR32_SYNTAX_LD_UHEQ,
++ AVR32_SYNTAX_LD_UHNE,
++ AVR32_SYNTAX_LD_UHCC,
++ AVR32_SYNTAX_LD_UHCS,
++ AVR32_SYNTAX_LD_UHGE,
++ AVR32_SYNTAX_LD_UHLT,
++ AVR32_SYNTAX_LD_UHMI,
++ AVR32_SYNTAX_LD_UHPL,
++ AVR32_SYNTAX_LD_UHLS,
++ AVR32_SYNTAX_LD_UHGT,
++ AVR32_SYNTAX_LD_UHLE,
++ AVR32_SYNTAX_LD_UHHI,
++ AVR32_SYNTAX_LD_UHVS,
++ AVR32_SYNTAX_LD_UHVC,
++ AVR32_SYNTAX_LD_UHQS,
++ AVR32_SYNTAX_LD_UHAL,
++ AVR32_SYNTAX_LD_UHHS,
++ AVR32_SYNTAX_LD_UHLO,
++ AVR32_SYNTAX_LD_SBEQ,
++ AVR32_SYNTAX_LD_SBNE,
++ AVR32_SYNTAX_LD_SBCC,
++ AVR32_SYNTAX_LD_SBCS,
++ AVR32_SYNTAX_LD_SBGE,
++ AVR32_SYNTAX_LD_SBLT,
++ AVR32_SYNTAX_LD_SBMI,
++ AVR32_SYNTAX_LD_SBPL,
++ AVR32_SYNTAX_LD_SBLS,
++ AVR32_SYNTAX_LD_SBGT,
++ AVR32_SYNTAX_LD_SBLE,
++ AVR32_SYNTAX_LD_SBHI,
++ AVR32_SYNTAX_LD_SBVS,
++ AVR32_SYNTAX_LD_SBVC,
++ AVR32_SYNTAX_LD_SBQS,
++ AVR32_SYNTAX_LD_SBAL,
++ AVR32_SYNTAX_LD_SBHS,
++ AVR32_SYNTAX_LD_SBLO,
++ AVR32_SYNTAX_LD_UBEQ,
++ AVR32_SYNTAX_LD_UBNE,
++ AVR32_SYNTAX_LD_UBCC,
++ AVR32_SYNTAX_LD_UBCS,
++ AVR32_SYNTAX_LD_UBGE,
++ AVR32_SYNTAX_LD_UBLT,
++ AVR32_SYNTAX_LD_UBMI,
++ AVR32_SYNTAX_LD_UBPL,
++ AVR32_SYNTAX_LD_UBLS,
++ AVR32_SYNTAX_LD_UBGT,
++ AVR32_SYNTAX_LD_UBLE,
++ AVR32_SYNTAX_LD_UBHI,
++ AVR32_SYNTAX_LD_UBVS,
++ AVR32_SYNTAX_LD_UBVC,
++ AVR32_SYNTAX_LD_UBQS,
++ AVR32_SYNTAX_LD_UBAL,
++ AVR32_SYNTAX_LD_UBHS,
++ AVR32_SYNTAX_LD_UBLO,
++ AVR32_SYNTAX_ST_WEQ,
++ AVR32_SYNTAX_ST_WNE,
++ AVR32_SYNTAX_ST_WCC,
++ AVR32_SYNTAX_ST_WCS,
++ AVR32_SYNTAX_ST_WGE,
++ AVR32_SYNTAX_ST_WLT,
++ AVR32_SYNTAX_ST_WMI,
++ AVR32_SYNTAX_ST_WPL,
++ AVR32_SYNTAX_ST_WLS,
++ AVR32_SYNTAX_ST_WGT,
++ AVR32_SYNTAX_ST_WLE,
++ AVR32_SYNTAX_ST_WHI,
++ AVR32_SYNTAX_ST_WVS,
++ AVR32_SYNTAX_ST_WVC,
++ AVR32_SYNTAX_ST_WQS,
++ AVR32_SYNTAX_ST_WAL,
++ AVR32_SYNTAX_ST_WHS,
++ AVR32_SYNTAX_ST_WLO,
++ AVR32_SYNTAX_ST_HEQ,
++ AVR32_SYNTAX_ST_HNE,
++ AVR32_SYNTAX_ST_HCC,
++ AVR32_SYNTAX_ST_HCS,
++ AVR32_SYNTAX_ST_HGE,
++ AVR32_SYNTAX_ST_HLT,
++ AVR32_SYNTAX_ST_HMI,
++ AVR32_SYNTAX_ST_HPL,
++ AVR32_SYNTAX_ST_HLS,
++ AVR32_SYNTAX_ST_HGT,
++ AVR32_SYNTAX_ST_HLE,
++ AVR32_SYNTAX_ST_HHI,
++ AVR32_SYNTAX_ST_HVS,
++ AVR32_SYNTAX_ST_HVC,
++ AVR32_SYNTAX_ST_HQS,
++ AVR32_SYNTAX_ST_HAL,
++ AVR32_SYNTAX_ST_HHS,
++ AVR32_SYNTAX_ST_HLO,
++ AVR32_SYNTAX_ST_BEQ,
++ AVR32_SYNTAX_ST_BNE,
++ AVR32_SYNTAX_ST_BCC,
++ AVR32_SYNTAX_ST_BCS,
++ AVR32_SYNTAX_ST_BGE,
++ AVR32_SYNTAX_ST_BLT,
++ AVR32_SYNTAX_ST_BMI,
++ AVR32_SYNTAX_ST_BPL,
++ AVR32_SYNTAX_ST_BLS,
++ AVR32_SYNTAX_ST_BGT,
++ AVR32_SYNTAX_ST_BLE,
++ AVR32_SYNTAX_ST_BHI,
++ AVR32_SYNTAX_ST_BVS,
++ AVR32_SYNTAX_ST_BVC,
++ AVR32_SYNTAX_ST_BQS,
++ AVR32_SYNTAX_ST_BAL,
++ AVR32_SYNTAX_ST_BHS,
++ AVR32_SYNTAX_ST_BLO,
++ AVR32_SYNTAX_MOVH,
++ AVR32_SYNTAX__END_
++};
++#define AVR32_NR_SYNTAX AVR32_SYNTAX__END_
++
++enum avr32_alias_type
++ {
++ AVR32_ALIAS_PICOSVMAC0,
++ AVR32_ALIAS_PICOSVMAC1,
++ AVR32_ALIAS_PICOSVMAC2,
++ AVR32_ALIAS_PICOSVMAC3,
++ AVR32_ALIAS_PICOSVMUL0,
++ AVR32_ALIAS_PICOSVMUL1,
++ AVR32_ALIAS_PICOSVMUL2,
++ AVR32_ALIAS_PICOSVMUL3,
++ AVR32_ALIAS_PICOVMAC0,
++ AVR32_ALIAS_PICOVMAC1,
++ AVR32_ALIAS_PICOVMAC2,
++ AVR32_ALIAS_PICOVMAC3,
++ AVR32_ALIAS_PICOVMUL0,
++ AVR32_ALIAS_PICOVMUL1,
++ AVR32_ALIAS_PICOVMUL2,
++ AVR32_ALIAS_PICOVMUL3,
++ AVR32_ALIAS_PICOLD_D1,
++ AVR32_ALIAS_PICOLD_D2,
++ AVR32_ALIAS_PICOLD_D3,
++ AVR32_ALIAS_PICOLD_W1,
++ AVR32_ALIAS_PICOLD_W2,
++ AVR32_ALIAS_PICOLD_W3,
++ AVR32_ALIAS_PICOLDM_D,
++ AVR32_ALIAS_PICOLDM_D_PU,
++ AVR32_ALIAS_PICOLDM_W,
++ AVR32_ALIAS_PICOLDM_W_PU,
++ AVR32_ALIAS_PICOMV_D1,
++ AVR32_ALIAS_PICOMV_D2,
++ AVR32_ALIAS_PICOMV_W1,
++ AVR32_ALIAS_PICOMV_W2,
++ AVR32_ALIAS_PICOST_D1,
++ AVR32_ALIAS_PICOST_D2,
++ AVR32_ALIAS_PICOST_D3,
++ AVR32_ALIAS_PICOST_W1,
++ AVR32_ALIAS_PICOST_W2,
++ AVR32_ALIAS_PICOST_W3,
++ AVR32_ALIAS_PICOSTM_D,
++ AVR32_ALIAS_PICOSTM_D_PU,
++ AVR32_ALIAS_PICOSTM_W,
++ AVR32_ALIAS_PICOSTM_W_PU,
++ AVR32_ALIAS__END_
++ };
++#define AVR32_NR_ALIAS AVR32_ALIAS__END_
++
++enum avr32_mnemonic_type
++{
++ AVR32_MNEMONIC_ABS,
++ AVR32_MNEMONIC_ACALL,
++ AVR32_MNEMONIC_ACR,
++ AVR32_MNEMONIC_ADC,
++ AVR32_MNEMONIC_ADD,
++ AVR32_MNEMONIC_ADDABS,
++ AVR32_MNEMONIC_ADDHH_W,
++ AVR32_MNEMONIC_AND,
++ AVR32_MNEMONIC_ANDH,
++ AVR32_MNEMONIC_ANDL,
++ AVR32_MNEMONIC_ANDN,
++ AVR32_MNEMONIC_ASR,
++ AVR32_MNEMONIC_BFEXTS,
++ AVR32_MNEMONIC_BFEXTU,
++ AVR32_MNEMONIC_BFINS,
++ AVR32_MNEMONIC_BLD,
++ AVR32_MNEMONIC_BREQ,
++ AVR32_MNEMONIC_BRNE,
++ AVR32_MNEMONIC_BRCC,
++ AVR32_MNEMONIC_BRCS,
++ AVR32_MNEMONIC_BRGE,
++ AVR32_MNEMONIC_BRLT,
++ AVR32_MNEMONIC_BRMI,
++ AVR32_MNEMONIC_BRPL,
++ AVR32_MNEMONIC_BRHS,
++ AVR32_MNEMONIC_BRLO,
++ AVR32_MNEMONIC_BRLS,
++ AVR32_MNEMONIC_BRGT,
++ AVR32_MNEMONIC_BRLE,
++ AVR32_MNEMONIC_BRHI,
++ AVR32_MNEMONIC_BRVS,
++ AVR32_MNEMONIC_BRVC,
++ AVR32_MNEMONIC_BRQS,
++ AVR32_MNEMONIC_BRAL,
++ AVR32_MNEMONIC_BREAKPOINT,
++ AVR32_MNEMONIC_BREV,
++ AVR32_MNEMONIC_BST,
++ AVR32_MNEMONIC_CACHE,
++ AVR32_MNEMONIC_CASTS_B,
++ AVR32_MNEMONIC_CASTS_H,
++ AVR32_MNEMONIC_CASTU_B,
++ AVR32_MNEMONIC_CASTU_H,
++ AVR32_MNEMONIC_CBR,
++ AVR32_MNEMONIC_CLZ,
++ AVR32_MNEMONIC_COM,
++ AVR32_MNEMONIC_COP,
++ AVR32_MNEMONIC_CP_B,
++ AVR32_MNEMONIC_CP_H,
++ AVR32_MNEMONIC_CP_W,
++ AVR32_MNEMONIC_CP,
++ AVR32_MNEMONIC_CPC,
++ AVR32_MNEMONIC_CSRF,
++ AVR32_MNEMONIC_CSRFCZ,
++ AVR32_MNEMONIC_DIVS,
++ AVR32_MNEMONIC_DIVU,
++ AVR32_MNEMONIC_EOR,
++ AVR32_MNEMONIC_EORL,
++ AVR32_MNEMONIC_EORH,
++ AVR32_MNEMONIC_FRS,
++ AVR32_MNEMONIC_SSCALL,
++ AVR32_MNEMONIC_RETSS,
++ AVR32_MNEMONIC_ICALL,
++ AVR32_MNEMONIC_INCJOSP,
++ AVR32_MNEMONIC_LD_D,
++ AVR32_MNEMONIC_LD_SB,
++ AVR32_MNEMONIC_LD_UB,
++ AVR32_MNEMONIC_LD_SH,
++ AVR32_MNEMONIC_LD_UH,
++ AVR32_MNEMONIC_LD_W,
++ AVR32_MNEMONIC_LDC_D,
++ AVR32_MNEMONIC_LDC_W,
++ AVR32_MNEMONIC_LDC0_D,
++ AVR32_MNEMONIC_LDC0_W,
++ AVR32_MNEMONIC_LDCM_D,
++ AVR32_MNEMONIC_LDCM_W,
++ AVR32_MNEMONIC_LDDPC,
++ AVR32_MNEMONIC_LDDSP,
++ AVR32_MNEMONIC_LDINS_B,
++ AVR32_MNEMONIC_LDINS_H,
++ AVR32_MNEMONIC_LDM,
++ AVR32_MNEMONIC_LDMTS,
++ AVR32_MNEMONIC_LDSWP_SH,
++ AVR32_MNEMONIC_LDSWP_UH,
++ AVR32_MNEMONIC_LDSWP_W,
++ AVR32_MNEMONIC_LSL,
++ AVR32_MNEMONIC_LSR,
++ AVR32_MNEMONIC_MAC,
++ AVR32_MNEMONIC_MACHH_D,
++ AVR32_MNEMONIC_MACHH_W,
++ AVR32_MNEMONIC_MACS_D,
++ AVR32_MNEMONIC_MACSATHH_W,
++ AVR32_MNEMONIC_MACU_D,
++ AVR32_MNEMONIC_MACWH_D,
++ AVR32_MNEMONIC_MAX,
++ AVR32_MNEMONIC_MCALL,
++ AVR32_MNEMONIC_MFDR,
++ AVR32_MNEMONIC_MFSR,
++ AVR32_MNEMONIC_MIN,
++ AVR32_MNEMONIC_MOV,
++ AVR32_MNEMONIC_MOVEQ,
++ AVR32_MNEMONIC_MOVNE,
++ AVR32_MNEMONIC_MOVCC,
++ AVR32_MNEMONIC_MOVCS,
++ AVR32_MNEMONIC_MOVGE,
++ AVR32_MNEMONIC_MOVLT,
++ AVR32_MNEMONIC_MOVMI,
++ AVR32_MNEMONIC_MOVPL,
++ AVR32_MNEMONIC_MOVLS,
++ AVR32_MNEMONIC_MOVGT,
++ AVR32_MNEMONIC_MOVLE,
++ AVR32_MNEMONIC_MOVHI,
++ AVR32_MNEMONIC_MOVVS,
++ AVR32_MNEMONIC_MOVVC,
++ AVR32_MNEMONIC_MOVQS,
++ AVR32_MNEMONIC_MOVAL,
++ AVR32_MNEMONIC_MOVHS,
++ AVR32_MNEMONIC_MOVLO,
++ AVR32_MNEMONIC_MTDR,
++ AVR32_MNEMONIC_MTSR,
++ AVR32_MNEMONIC_MUL,
++ AVR32_MNEMONIC_MULHH_W,
++ AVR32_MNEMONIC_MULNHH_W,
++ AVR32_MNEMONIC_MULNWH_D,
++ AVR32_MNEMONIC_MULS_D,
++ AVR32_MNEMONIC_MULSATHH_H,
++ AVR32_MNEMONIC_MULSATHH_W,
++ AVR32_MNEMONIC_MULSATRNDHH_H,
++ AVR32_MNEMONIC_MULSATRNDWH_W,
++ AVR32_MNEMONIC_MULSATWH_W,
++ AVR32_MNEMONIC_MULU_D,
++ AVR32_MNEMONIC_MULWH_D,
++ AVR32_MNEMONIC_MUSFR,
++ AVR32_MNEMONIC_MUSTR,
++ AVR32_MNEMONIC_MVCR_D,
++ AVR32_MNEMONIC_MVCR_W,
++ AVR32_MNEMONIC_MVRC_D,
++ AVR32_MNEMONIC_MVRC_W,
++ AVR32_MNEMONIC_NEG,
++ AVR32_MNEMONIC_NOP,
++ AVR32_MNEMONIC_OR,
++ AVR32_MNEMONIC_ORH,
++ AVR32_MNEMONIC_ORL,
++ AVR32_MNEMONIC_PABS_SB,
++ AVR32_MNEMONIC_PABS_SH,
++ AVR32_MNEMONIC_PACKSH_SB,
++ AVR32_MNEMONIC_PACKSH_UB,
++ AVR32_MNEMONIC_PACKW_SH,
++ AVR32_MNEMONIC_PADD_B,
++ AVR32_MNEMONIC_PADD_H,
++ AVR32_MNEMONIC_PADDH_SH,
++ AVR32_MNEMONIC_PADDH_UB,
++ AVR32_MNEMONIC_PADDS_SB,
++ AVR32_MNEMONIC_PADDS_SH,
++ AVR32_MNEMONIC_PADDS_UB,
++ AVR32_MNEMONIC_PADDS_UH,
++ AVR32_MNEMONIC_PADDSUB_H,
++ AVR32_MNEMONIC_PADDSUBH_SH,
++ AVR32_MNEMONIC_PADDSUBS_SH,
++ AVR32_MNEMONIC_PADDSUBS_UH,
++ AVR32_MNEMONIC_PADDX_H,
++ AVR32_MNEMONIC_PADDXH_SH,
++ AVR32_MNEMONIC_PADDXS_SH,
++ AVR32_MNEMONIC_PADDXS_UH,
++ AVR32_MNEMONIC_PASR_B,
++ AVR32_MNEMONIC_PASR_H,
++ AVR32_MNEMONIC_PAVG_SH,
++ AVR32_MNEMONIC_PAVG_UB,
++ AVR32_MNEMONIC_PLSL_B,
++ AVR32_MNEMONIC_PLSL_H,
++ AVR32_MNEMONIC_PLSR_B,
++ AVR32_MNEMONIC_PLSR_H,
++ AVR32_MNEMONIC_PMAX_SH,
++ AVR32_MNEMONIC_PMAX_UB,
++ AVR32_MNEMONIC_PMIN_SH,
++ AVR32_MNEMONIC_PMIN_UB,
++ AVR32_MNEMONIC_POPJC,
++ AVR32_MNEMONIC_POPM,
++ AVR32_MNEMONIC_PREF,
++ AVR32_MNEMONIC_PSAD,
++ AVR32_MNEMONIC_PSUB_B,
++ AVR32_MNEMONIC_PSUB_H,
++ AVR32_MNEMONIC_PSUBADD_H,
++ AVR32_MNEMONIC_PSUBADDH_SH,
++ AVR32_MNEMONIC_PSUBADDS_SH,
++ AVR32_MNEMONIC_PSUBADDS_UH,
++ AVR32_MNEMONIC_PSUBH_SH,
++ AVR32_MNEMONIC_PSUBH_UB,
++ AVR32_MNEMONIC_PSUBS_SB,
++ AVR32_MNEMONIC_PSUBS_SH,
++ AVR32_MNEMONIC_PSUBS_UB,
++ AVR32_MNEMONIC_PSUBS_UH,
++ AVR32_MNEMONIC_PSUBX_H,
++ AVR32_MNEMONIC_PSUBXH_SH,
++ AVR32_MNEMONIC_PSUBXS_SH,
++ AVR32_MNEMONIC_PSUBXS_UH,
++ AVR32_MNEMONIC_PUNPCKSB_H,
++ AVR32_MNEMONIC_PUNPCKUB_H,
++ AVR32_MNEMONIC_PUSHJC,
++ AVR32_MNEMONIC_PUSHM,
++ AVR32_MNEMONIC_RCALL,
++ AVR32_MNEMONIC_RETEQ,
++ AVR32_MNEMONIC_RETNE,
++ AVR32_MNEMONIC_RETCC,
++ AVR32_MNEMONIC_RETCS,
++ AVR32_MNEMONIC_RETGE,
++ AVR32_MNEMONIC_RETLT,
++ AVR32_MNEMONIC_RETMI,
++ AVR32_MNEMONIC_RETPL,
++ AVR32_MNEMONIC_RETLS,
++ AVR32_MNEMONIC_RETGT,
++ AVR32_MNEMONIC_RETLE,
++ AVR32_MNEMONIC_RETHI,
++ AVR32_MNEMONIC_RETVS,
++ AVR32_MNEMONIC_RETVC,
++ AVR32_MNEMONIC_RETQS,
++ AVR32_MNEMONIC_RETAL,
++ AVR32_MNEMONIC_RETHS,
++ AVR32_MNEMONIC_RETLO,
++ AVR32_MNEMONIC_RET,
++ AVR32_MNEMONIC_RETD,
++ AVR32_MNEMONIC_RETE,
++ AVR32_MNEMONIC_RETJ,
++ AVR32_MNEMONIC_RETS,
++ AVR32_MNEMONIC_RJMP,
++ AVR32_MNEMONIC_ROL,
++ AVR32_MNEMONIC_ROR,
++ AVR32_MNEMONIC_RSUB,
++ AVR32_MNEMONIC_SATADD_H,
++ AVR32_MNEMONIC_SATADD_W,
++ AVR32_MNEMONIC_SATRNDS,
++ AVR32_MNEMONIC_SATRNDU,
++ AVR32_MNEMONIC_SATS,
++ AVR32_MNEMONIC_SATSUB_H,
++ AVR32_MNEMONIC_SATSUB_W,
++ AVR32_MNEMONIC_SATU,
++ AVR32_MNEMONIC_SBC,
++ AVR32_MNEMONIC_SBR,
++ AVR32_MNEMONIC_SCALL,
++ AVR32_MNEMONIC_SCR,
++ AVR32_MNEMONIC_SLEEP,
++ AVR32_MNEMONIC_SREQ,
++ AVR32_MNEMONIC_SRNE,
++ AVR32_MNEMONIC_SRCC,
++ AVR32_MNEMONIC_SRCS,
++ AVR32_MNEMONIC_SRGE,
++ AVR32_MNEMONIC_SRLT,
++ AVR32_MNEMONIC_SRMI,
++ AVR32_MNEMONIC_SRPL,
++ AVR32_MNEMONIC_SRLS,
++ AVR32_MNEMONIC_SRGT,
++ AVR32_MNEMONIC_SRLE,
++ AVR32_MNEMONIC_SRHI,
++ AVR32_MNEMONIC_SRVS,
++ AVR32_MNEMONIC_SRVC,
++ AVR32_MNEMONIC_SRQS,
++ AVR32_MNEMONIC_SRAL,
++ AVR32_MNEMONIC_SRHS,
++ AVR32_MNEMONIC_SRLO,
++ AVR32_MNEMONIC_SSRF,
++ AVR32_MNEMONIC_ST_B,
++ AVR32_MNEMONIC_ST_D,
++ AVR32_MNEMONIC_ST_H,
++ AVR32_MNEMONIC_ST_W,
++ AVR32_MNEMONIC_STC_D,
++ AVR32_MNEMONIC_STC_W,
++ AVR32_MNEMONIC_STC0_D,
++ AVR32_MNEMONIC_STC0_W,
++ AVR32_MNEMONIC_STCM_D,
++ AVR32_MNEMONIC_STCM_W,
++ AVR32_MNEMONIC_STCOND,
++ AVR32_MNEMONIC_STDSP,
++ AVR32_MNEMONIC_STHH_W,
++ AVR32_MNEMONIC_STM,
++ AVR32_MNEMONIC_STMTS,
++ AVR32_MNEMONIC_STSWP_H,
++ AVR32_MNEMONIC_STSWP_W,
++ AVR32_MNEMONIC_SUB,
++ AVR32_MNEMONIC_SUBEQ,
++ AVR32_MNEMONIC_SUBNE,
++ AVR32_MNEMONIC_SUBCC,
++ AVR32_MNEMONIC_SUBCS,
++ AVR32_MNEMONIC_SUBGE,
++ AVR32_MNEMONIC_SUBLT,
++ AVR32_MNEMONIC_SUBMI,
++ AVR32_MNEMONIC_SUBPL,
++ AVR32_MNEMONIC_SUBLS,
++ AVR32_MNEMONIC_SUBGT,
++ AVR32_MNEMONIC_SUBLE,
++ AVR32_MNEMONIC_SUBHI,
++ AVR32_MNEMONIC_SUBVS,
++ AVR32_MNEMONIC_SUBVC,
++ AVR32_MNEMONIC_SUBQS,
++ AVR32_MNEMONIC_SUBAL,
++ AVR32_MNEMONIC_SUBHS,
++ AVR32_MNEMONIC_SUBLO,
++ AVR32_MNEMONIC_SUBFEQ,
++ AVR32_MNEMONIC_SUBFNE,
++ AVR32_MNEMONIC_SUBFCC,
++ AVR32_MNEMONIC_SUBFCS,
++ AVR32_MNEMONIC_SUBFGE,
++ AVR32_MNEMONIC_SUBFLT,
++ AVR32_MNEMONIC_SUBFMI,
++ AVR32_MNEMONIC_SUBFPL,
++ AVR32_MNEMONIC_SUBFLS,
++ AVR32_MNEMONIC_SUBFGT,
++ AVR32_MNEMONIC_SUBFLE,
++ AVR32_MNEMONIC_SUBFHI,
++ AVR32_MNEMONIC_SUBFVS,
++ AVR32_MNEMONIC_SUBFVC,
++ AVR32_MNEMONIC_SUBFQS,
++ AVR32_MNEMONIC_SUBFAL,
++ AVR32_MNEMONIC_SUBFHS,
++ AVR32_MNEMONIC_SUBFLO,
++ AVR32_MNEMONIC_SUBHH_W,
++ AVR32_MNEMONIC_SWAP_B,
++ AVR32_MNEMONIC_SWAP_BH,
++ AVR32_MNEMONIC_SWAP_H,
++ AVR32_MNEMONIC_SYNC,
++ AVR32_MNEMONIC_TLBR,
++ AVR32_MNEMONIC_TLBS,
++ AVR32_MNEMONIC_TLBW,
++ AVR32_MNEMONIC_TNBZ,
++ AVR32_MNEMONIC_TST,
++ AVR32_MNEMONIC_XCHG,
++ AVR32_MNEMONIC_MEMC,
++ AVR32_MNEMONIC_MEMS,
++ AVR32_MNEMONIC_MEMT,
++ AVR32_MNEMONIC_FMAC_S,
++ AVR32_MNEMONIC_FNMAC_S,
++ AVR32_MNEMONIC_FMSC_S,
++ AVR32_MNEMONIC_FNMSC_S,
++ AVR32_MNEMONIC_FMUL_S,
++ AVR32_MNEMONIC_FNMUL_S,
++ AVR32_MNEMONIC_FADD_S,
++ AVR32_MNEMONIC_FSUB_S,
++ AVR32_MNEMONIC_FCASTRS_SW,
++ AVR32_MNEMONIC_FCASTRS_UW,
++ AVR32_MNEMONIC_FCASTSW_S,
++ AVR32_MNEMONIC_FCASTUW_S,
++ AVR32_MNEMONIC_FCMP_S,
++ AVR32_MNEMONIC_FCHK_S,
++ AVR32_MNEMONIC_FRCPA_S,
++ AVR32_MNEMONIC_FRSQRTA_S,
++ /* AVR32_MNEMONIC_FLD_S,
++ AVR32_MNEMONIC_FLD_D,
++ AVR32_MNEMONIC_FST_S,
++ AVR32_MNEMONIC_FST_D, */
++ AVR32_MNEMONIC_LDA_W,
++ AVR32_MNEMONIC_CALL,
++ AVR32_MNEMONIC_PICOSVMAC,
++ AVR32_MNEMONIC_PICOSVMUL,
++ AVR32_MNEMONIC_PICOVMAC,
++ AVR32_MNEMONIC_PICOVMUL,
++ AVR32_MNEMONIC_PICOLD_D,
++ AVR32_MNEMONIC_PICOLD_W,
++ AVR32_MNEMONIC_PICOLDM_D,
++ AVR32_MNEMONIC_PICOLDM_W,
++ AVR32_MNEMONIC_PICOMV_D,
++ AVR32_MNEMONIC_PICOMV_W,
++ AVR32_MNEMONIC_PICOST_D,
++ AVR32_MNEMONIC_PICOST_W,
++ AVR32_MNEMONIC_PICOSTM_D,
++ AVR32_MNEMONIC_PICOSTM_W,
++ AVR32_MNEMONIC_RSUBEQ,
++ AVR32_MNEMONIC_RSUBNE,
++ AVR32_MNEMONIC_RSUBCC,
++ AVR32_MNEMONIC_RSUBCS,
++ AVR32_MNEMONIC_RSUBGE,
++ AVR32_MNEMONIC_RSUBLT,
++ AVR32_MNEMONIC_RSUBMI,
++ AVR32_MNEMONIC_RSUBPL,
++ AVR32_MNEMONIC_RSUBLS,
++ AVR32_MNEMONIC_RSUBGT,
++ AVR32_MNEMONIC_RSUBLE,
++ AVR32_MNEMONIC_RSUBHI,
++ AVR32_MNEMONIC_RSUBVS,
++ AVR32_MNEMONIC_RSUBVC,
++ AVR32_MNEMONIC_RSUBQS,
++ AVR32_MNEMONIC_RSUBAL,
++ AVR32_MNEMONIC_RSUBHS,
++ AVR32_MNEMONIC_RSUBLO,
++ AVR32_MNEMONIC_ADDEQ,
++ AVR32_MNEMONIC_ADDNE,
++ AVR32_MNEMONIC_ADDCC,
++ AVR32_MNEMONIC_ADDCS,
++ AVR32_MNEMONIC_ADDGE,
++ AVR32_MNEMONIC_ADDLT,
++ AVR32_MNEMONIC_ADDMI,
++ AVR32_MNEMONIC_ADDPL,
++ AVR32_MNEMONIC_ADDLS,
++ AVR32_MNEMONIC_ADDGT,
++ AVR32_MNEMONIC_ADDLE,
++ AVR32_MNEMONIC_ADDHI,
++ AVR32_MNEMONIC_ADDVS,
++ AVR32_MNEMONIC_ADDVC,
++ AVR32_MNEMONIC_ADDQS,
++ AVR32_MNEMONIC_ADDAL,
++ AVR32_MNEMONIC_ADDHS,
++ AVR32_MNEMONIC_ADDLO,
++ AVR32_MNEMONIC_ANDEQ,
++ AVR32_MNEMONIC_ANDNE,
++ AVR32_MNEMONIC_ANDCC,
++ AVR32_MNEMONIC_ANDCS,
++ AVR32_MNEMONIC_ANDGE,
++ AVR32_MNEMONIC_ANDLT,
++ AVR32_MNEMONIC_ANDMI,
++ AVR32_MNEMONIC_ANDPL,
++ AVR32_MNEMONIC_ANDLS,
++ AVR32_MNEMONIC_ANDGT,
++ AVR32_MNEMONIC_ANDLE,
++ AVR32_MNEMONIC_ANDHI,
++ AVR32_MNEMONIC_ANDVS,
++ AVR32_MNEMONIC_ANDVC,
++ AVR32_MNEMONIC_ANDQS,
++ AVR32_MNEMONIC_ANDAL,
++ AVR32_MNEMONIC_ANDHS,
++ AVR32_MNEMONIC_ANDLO,
++ AVR32_MNEMONIC_OREQ,
++ AVR32_MNEMONIC_ORNE,
++ AVR32_MNEMONIC_ORCC,
++ AVR32_MNEMONIC_ORCS,
++ AVR32_MNEMONIC_ORGE,
++ AVR32_MNEMONIC_ORLT,
++ AVR32_MNEMONIC_ORMI,
++ AVR32_MNEMONIC_ORPL,
++ AVR32_MNEMONIC_ORLS,
++ AVR32_MNEMONIC_ORGT,
++ AVR32_MNEMONIC_ORLE,
++ AVR32_MNEMONIC_ORHI,
++ AVR32_MNEMONIC_ORVS,
++ AVR32_MNEMONIC_ORVC,
++ AVR32_MNEMONIC_ORQS,
++ AVR32_MNEMONIC_ORAL,
++ AVR32_MNEMONIC_ORHS,
++ AVR32_MNEMONIC_ORLO,
++ AVR32_MNEMONIC_EOREQ,
++ AVR32_MNEMONIC_EORNE,
++ AVR32_MNEMONIC_EORCC,
++ AVR32_MNEMONIC_EORCS,
++ AVR32_MNEMONIC_EORGE,
++ AVR32_MNEMONIC_EORLT,
++ AVR32_MNEMONIC_EORMI,
++ AVR32_MNEMONIC_EORPL,
++ AVR32_MNEMONIC_EORLS,
++ AVR32_MNEMONIC_EORGT,
++ AVR32_MNEMONIC_EORLE,
++ AVR32_MNEMONIC_EORHI,
++ AVR32_MNEMONIC_EORVS,
++ AVR32_MNEMONIC_EORVC,
++ AVR32_MNEMONIC_EORQS,
++ AVR32_MNEMONIC_EORAL,
++ AVR32_MNEMONIC_EORHS,
++ AVR32_MNEMONIC_EORLO,
++ AVR32_MNEMONIC_LD_WEQ,
++ AVR32_MNEMONIC_LD_WNE,
++ AVR32_MNEMONIC_LD_WCC,
++ AVR32_MNEMONIC_LD_WCS,
++ AVR32_MNEMONIC_LD_WGE,
++ AVR32_MNEMONIC_LD_WLT,
++ AVR32_MNEMONIC_LD_WMI,
++ AVR32_MNEMONIC_LD_WPL,
++ AVR32_MNEMONIC_LD_WLS,
++ AVR32_MNEMONIC_LD_WGT,
++ AVR32_MNEMONIC_LD_WLE,
++ AVR32_MNEMONIC_LD_WHI,
++ AVR32_MNEMONIC_LD_WVS,
++ AVR32_MNEMONIC_LD_WVC,
++ AVR32_MNEMONIC_LD_WQS,
++ AVR32_MNEMONIC_LD_WAL,
++ AVR32_MNEMONIC_LD_WHS,
++ AVR32_MNEMONIC_LD_WLO,
++ AVR32_MNEMONIC_LD_SHEQ,
++ AVR32_MNEMONIC_LD_SHNE,
++ AVR32_MNEMONIC_LD_SHCC,
++ AVR32_MNEMONIC_LD_SHCS,
++ AVR32_MNEMONIC_LD_SHGE,
++ AVR32_MNEMONIC_LD_SHLT,
++ AVR32_MNEMONIC_LD_SHMI,
++ AVR32_MNEMONIC_LD_SHPL,
++ AVR32_MNEMONIC_LD_SHLS,
++ AVR32_MNEMONIC_LD_SHGT,
++ AVR32_MNEMONIC_LD_SHLE,
++ AVR32_MNEMONIC_LD_SHHI,
++ AVR32_MNEMONIC_LD_SHVS,
++ AVR32_MNEMONIC_LD_SHVC,
++ AVR32_MNEMONIC_LD_SHQS,
++ AVR32_MNEMONIC_LD_SHAL,
++ AVR32_MNEMONIC_LD_SHHS,
++ AVR32_MNEMONIC_LD_SHLO,
++ AVR32_MNEMONIC_LD_UHEQ,
++ AVR32_MNEMONIC_LD_UHNE,
++ AVR32_MNEMONIC_LD_UHCC,
++ AVR32_MNEMONIC_LD_UHCS,
++ AVR32_MNEMONIC_LD_UHGE,
++ AVR32_MNEMONIC_LD_UHLT,
++ AVR32_MNEMONIC_LD_UHMI,
++ AVR32_MNEMONIC_LD_UHPL,
++ AVR32_MNEMONIC_LD_UHLS,
++ AVR32_MNEMONIC_LD_UHGT,
++ AVR32_MNEMONIC_LD_UHLE,
++ AVR32_MNEMONIC_LD_UHHI,
++ AVR32_MNEMONIC_LD_UHVS,
++ AVR32_MNEMONIC_LD_UHVC,
++ AVR32_MNEMONIC_LD_UHQS,
++ AVR32_MNEMONIC_LD_UHAL,
++ AVR32_MNEMONIC_LD_UHHS,
++ AVR32_MNEMONIC_LD_UHLO,
++ AVR32_MNEMONIC_LD_SBEQ,
++ AVR32_MNEMONIC_LD_SBNE,
++ AVR32_MNEMONIC_LD_SBCC,
++ AVR32_MNEMONIC_LD_SBCS,
++ AVR32_MNEMONIC_LD_SBGE,
++ AVR32_MNEMONIC_LD_SBLT,
++ AVR32_MNEMONIC_LD_SBMI,
++ AVR32_MNEMONIC_LD_SBPL,
++ AVR32_MNEMONIC_LD_SBLS,
++ AVR32_MNEMONIC_LD_SBGT,
++ AVR32_MNEMONIC_LD_SBLE,
++ AVR32_MNEMONIC_LD_SBHI,
++ AVR32_MNEMONIC_LD_SBVS,
++ AVR32_MNEMONIC_LD_SBVC,
++ AVR32_MNEMONIC_LD_SBQS,
++ AVR32_MNEMONIC_LD_SBAL,
++ AVR32_MNEMONIC_LD_SBHS,
++ AVR32_MNEMONIC_LD_SBLO,
++ AVR32_MNEMONIC_LD_UBEQ,
++ AVR32_MNEMONIC_LD_UBNE,
++ AVR32_MNEMONIC_LD_UBCC,
++ AVR32_MNEMONIC_LD_UBCS,
++ AVR32_MNEMONIC_LD_UBGE,
++ AVR32_MNEMONIC_LD_UBLT,
++ AVR32_MNEMONIC_LD_UBMI,
++ AVR32_MNEMONIC_LD_UBPL,
++ AVR32_MNEMONIC_LD_UBLS,
++ AVR32_MNEMONIC_LD_UBGT,
++ AVR32_MNEMONIC_LD_UBLE,
++ AVR32_MNEMONIC_LD_UBHI,
++ AVR32_MNEMONIC_LD_UBVS,
++ AVR32_MNEMONIC_LD_UBVC,
++ AVR32_MNEMONIC_LD_UBQS,
++ AVR32_MNEMONIC_LD_UBAL,
++ AVR32_MNEMONIC_LD_UBHS,
++ AVR32_MNEMONIC_LD_UBLO,
++ AVR32_MNEMONIC_ST_WEQ,
++ AVR32_MNEMONIC_ST_WNE,
++ AVR32_MNEMONIC_ST_WCC,
++ AVR32_MNEMONIC_ST_WCS,
++ AVR32_MNEMONIC_ST_WGE,
++ AVR32_MNEMONIC_ST_WLT,
++ AVR32_MNEMONIC_ST_WMI,
++ AVR32_MNEMONIC_ST_WPL,
++ AVR32_MNEMONIC_ST_WLS,
++ AVR32_MNEMONIC_ST_WGT,
++ AVR32_MNEMONIC_ST_WLE,
++ AVR32_MNEMONIC_ST_WHI,
++ AVR32_MNEMONIC_ST_WVS,
++ AVR32_MNEMONIC_ST_WVC,
++ AVR32_MNEMONIC_ST_WQS,
++ AVR32_MNEMONIC_ST_WAL,
++ AVR32_MNEMONIC_ST_WHS,
++ AVR32_MNEMONIC_ST_WLO,
++ AVR32_MNEMONIC_ST_HEQ,
++ AVR32_MNEMONIC_ST_HNE,
++ AVR32_MNEMONIC_ST_HCC,
++ AVR32_MNEMONIC_ST_HCS,
++ AVR32_MNEMONIC_ST_HGE,
++ AVR32_MNEMONIC_ST_HLT,
++ AVR32_MNEMONIC_ST_HMI,
++ AVR32_MNEMONIC_ST_HPL,
++ AVR32_MNEMONIC_ST_HLS,
++ AVR32_MNEMONIC_ST_HGT,
++ AVR32_MNEMONIC_ST_HLE,
++ AVR32_MNEMONIC_ST_HHI,
++ AVR32_MNEMONIC_ST_HVS,
++ AVR32_MNEMONIC_ST_HVC,
++ AVR32_MNEMONIC_ST_HQS,
++ AVR32_MNEMONIC_ST_HAL,
++ AVR32_MNEMONIC_ST_HHS,
++ AVR32_MNEMONIC_ST_HLO,
++ AVR32_MNEMONIC_ST_BEQ,
++ AVR32_MNEMONIC_ST_BNE,
++ AVR32_MNEMONIC_ST_BCC,
++ AVR32_MNEMONIC_ST_BCS,
++ AVR32_MNEMONIC_ST_BGE,
++ AVR32_MNEMONIC_ST_BLT,
++ AVR32_MNEMONIC_ST_BMI,
++ AVR32_MNEMONIC_ST_BPL,
++ AVR32_MNEMONIC_ST_BLS,
++ AVR32_MNEMONIC_ST_BGT,
++ AVR32_MNEMONIC_ST_BLE,
++ AVR32_MNEMONIC_ST_BHI,
++ AVR32_MNEMONIC_ST_BVS,
++ AVR32_MNEMONIC_ST_BVC,
++ AVR32_MNEMONIC_ST_BQS,
++ AVR32_MNEMONIC_ST_BAL,
++ AVR32_MNEMONIC_ST_BHS,
++ AVR32_MNEMONIC_ST_BLO,
++ AVR32_MNEMONIC_MOVH,
++ AVR32_MNEMONIC__END_
++};
++#define AVR32_NR_MNEMONICS AVR32_MNEMONIC__END_
++
++enum avr32_syntax_parser
++ {
++ AVR32_PARSER_NORMAL,
++ AVR32_PARSER_ALIAS,
++ AVR32_PARSER_LDA,
++ AVR32_PARSER_CALL,
++ AVR32_PARSER__END_
++ };
++#define AVR32_NR_PARSERS AVR32_PARSER__END_
+--- a/opcodes/configure.in
++++ b/opcodes/configure.in
+@@ -223,6 +223,7 @@ if test x${all_targets} = xfalse ; then
+ bfd_arc_arch) ta="$ta arc-dis.lo arc-opc.lo arc-ext.lo" ;;
+ bfd_arm_arch) ta="$ta arm-dis.lo" ;;
+ bfd_avr_arch) ta="$ta avr-dis.lo" ;;
++ bfd_avr32_arch) ta="$ta avr32-asm.lo avr32-dis.lo avr32-opc.lo" ;;
+ bfd_bfin_arch) ta="$ta bfin-dis.lo" ;;
+ bfd_cr16_arch) ta="$ta cr16-dis.lo cr16-opc.lo" ;;
+ bfd_cris_arch) ta="$ta cris-dis.lo cris-opc.lo cgen-bitset.lo" ;;
+@@ -285,7 +286,7 @@ if test x${all_targets} = xfalse ; then
+ ta="$ta sh64-dis.lo sh64-opc.lo"
+ archdefs="$archdefs -DINCLUDE_SHMEDIA"
+ break;;
+- esac;
++ esac
+ done
+ ta="$ta sh-dis.lo cgen-bitset.lo" ;;
+ bfd_sparc_arch) ta="$ta sparc-dis.lo sparc-opc.lo" ;;
+--- a/opcodes/disassemble.c
++++ b/opcodes/disassemble.c
+@@ -27,6 +27,7 @@
+ #define ARCH_arc
+ #define ARCH_arm
+ #define ARCH_avr
++#define ARCH_avr32
+ #define ARCH_bfin
+ #define ARCH_cr16
+ #define ARCH_cris
+@@ -131,6 +132,11 @@ disassembler (abfd)
+ disassemble = print_insn_avr;
+ break;
+ #endif
++#ifdef ARCH_avr32
++ case bfd_arch_avr32:
++ disassemble = print_insn_avr32;
++ break;
++#endif
+ #ifdef ARCH_bfin
+ case bfd_arch_bfin:
+ disassemble = print_insn_bfin;
+@@ -485,6 +491,9 @@ disassembler_usage (stream)
+ #ifdef ARCH_i386
+ print_i386_disassembler_options (stream);
+ #endif
++#ifdef ARCH_avr32
++ print_avr32_disassembler_options (stream);
++#endif
+ #ifdef ARCH_s390
+ print_s390_disassembler_options (stream);
+ #endif
+--- a/bfd/configure
++++ b/bfd/configure
+@@ -14787,6 +14787,7 @@ do
+ bfd_pei_ia64_vec) tb="$tb pei-ia64.lo pepigen.lo cofflink.lo"; target_size=64 ;;
+ bfd_elf32_am33lin_vec) tb="$tb elf32-am33lin.lo elf32.lo $elf" ;;
+ bfd_elf32_avr_vec) tb="$tb elf32-avr.lo elf32.lo $elf" ;;
++ bfd_elf32_avr32_vec) tb="$tb elf32-avr32.lo elf32.lo $elf" ;;
+ bfd_elf32_bfin_vec) tb="$tb elf32-bfin.lo elf32.lo $elf" ;;
+ bfd_elf32_bfinfdpic_vec) tb="$tb elf32-bfin.lo elf32.lo $elf" ;;
+ bfd_elf32_big_generic_vec) tb="$tb elf32-gen.lo elf32.lo $elf" ;;
+--- a/opcodes/configure
++++ b/opcodes/configure
+@@ -12284,6 +12284,7 @@ if test x${all_targets} = xfalse ; then
+ bfd_arc_arch) ta="$ta arc-dis.lo arc-opc.lo arc-ext.lo" ;;
+ bfd_arm_arch) ta="$ta arm-dis.lo" ;;
+ bfd_avr_arch) ta="$ta avr-dis.lo" ;;
++ bfd_avr32_arch) ta="$ta avr32-asm.lo avr32-dis.lo avr32-opc.lo" ;;
+ bfd_bfin_arch) ta="$ta bfin-dis.lo" ;;
+ bfd_cr16_arch) ta="$ta cr16-dis.lo cr16-opc.lo" ;;
+ bfd_cris_arch) ta="$ta cris-dis.lo cris-opc.lo cgen-bitset.lo" ;;
+--- a/bfd/libbfd.h
++++ b/bfd/libbfd.h
+@@ -1646,6 +1646,48 @@ static const char *const bfd_reloc_code_
+ "BFD_RELOC_AVR_LDI",
+ "BFD_RELOC_AVR_6",
+ "BFD_RELOC_AVR_6_ADIW",
++ "BFD_RELOC_AVR32_DIFF32",
++ "BFD_RELOC_AVR32_DIFF16",
++ "BFD_RELOC_AVR32_DIFF8",
++ "BFD_RELOC_AVR32_GOT32",
++ "BFD_RELOC_AVR32_GOT16",
++ "BFD_RELOC_AVR32_GOT8",
++ "BFD_RELOC_AVR32_21S",
++ "BFD_RELOC_AVR32_16U",
++ "BFD_RELOC_AVR32_16S",
++ "BFD_RELOC_AVR32_SUB5",
++ "BFD_RELOC_AVR32_8S_EXT",
++ "BFD_RELOC_AVR32_8S",
++ "BFD_RELOC_AVR32_15S",
++ "BFD_RELOC_AVR32_22H_PCREL",
++ "BFD_RELOC_AVR32_18W_PCREL",
++ "BFD_RELOC_AVR32_16B_PCREL",
++ "BFD_RELOC_AVR32_16N_PCREL",
++ "BFD_RELOC_AVR32_14UW_PCREL",
++ "BFD_RELOC_AVR32_11H_PCREL",
++ "BFD_RELOC_AVR32_10UW_PCREL",
++ "BFD_RELOC_AVR32_9H_PCREL",
++ "BFD_RELOC_AVR32_9UW_PCREL",
++ "BFD_RELOC_AVR32_GOTPC",
++ "BFD_RELOC_AVR32_GOTCALL",
++ "BFD_RELOC_AVR32_LDA_GOT",
++ "BFD_RELOC_AVR32_GOT21S",
++ "BFD_RELOC_AVR32_GOT18SW",
++ "BFD_RELOC_AVR32_GOT16S",
++ "BFD_RELOC_AVR32_32_CPENT",
++ "BFD_RELOC_AVR32_CPCALL",
++ "BFD_RELOC_AVR32_16_CP",
++ "BFD_RELOC_AVR32_9W_CP",
++ "BFD_RELOC_AVR32_ALIGN",
++ "BFD_RELOC_AVR32_14UW",
++ "BFD_RELOC_AVR32_10UW",
++ "BFD_RELOC_AVR32_10SW",
++ "BFD_RELOC_AVR32_STHH_W",
++ "BFD_RELOC_AVR32_7UW",
++ "BFD_RELOC_AVR32_6S",
++ "BFD_RELOC_AVR32_6UW",
++ "BFD_RELOC_AVR32_4UH",
++ "BFD_RELOC_AVR32_3U",
+ "BFD_RELOC_390_12",
+ "BFD_RELOC_390_GOT12",
+ "BFD_RELOC_390_PLT32",
+--- a/ld/Makefile.in
++++ b/ld/Makefile.in
+@@ -434,6 +434,53 @@ ALL_EMULATIONS = \
+ eavr5.o \
+ eavr51.o \
+ eavr6.o \
++ eavr32elf_ap7000.o \
++ eavr32elf_ap7001.o \
++ eavr32elf_ap7002.o \
++ eavr32elf_ap7200.o \
++ eavr32elf_uc3a0128.o \
++ eavr32elf_uc3a0256.o \
++ eavr32elf_uc3a0512.o \
++ eavr32elf_uc3a0512es.o \
++ eavr32elf_uc3a1128.o \
++ eavr32elf_uc3a1256.o \
++ eavr32elf_uc3a1512es.o \
++ eavr32elf_uc3a1512.o \
++ eavr32elf_uc3a364.o \
++ eavr32elf_uc3a364s.o \
++ eavr32elf_uc3a3128.o \
++ eavr32elf_uc3a3128s.o \
++ eavr32elf_uc3a3256.o \
++ eavr32elf_uc3a3256s.o \
++ eavr32elf_uc3b064.o \
++ eavr32elf_uc3b0128.o \
++ eavr32elf_uc3b0256es.o \
++ eavr32elf_uc3b0256.o \
++ eavr32elf_uc3b0512.o \
++ eavr32elf_uc3b0512revc.o \
++ eavr32elf_uc3b164.o \
++ eavr32elf_uc3b1128.o \
++ eavr32elf_uc3b1256es.o \
++ eavr32elf_uc3b1256.o \
++ eavr32elf_uc3b1512.o \
++ eavr32elf_uc3b1512revc.o \
++ eavr32elf_uc3c064c.o \
++ eavr32elf_uc3c0128c.o \
++ eavr32elf_uc3c0256c.o \
++ eavr32elf_uc3c0512crevc.o \
++ eavr32elf_uc3c164c.o \
++ eavr32elf_uc3c1128c.o \
++ eavr32elf_uc3c1256c.o \
++ eavr32elf_uc3c1512crevc.o \
++ eavr32elf_uc3c264c.o \
++ eavr32elf_uc3c2128c.o \
++ eavr32elf_uc3c2256c.o \
++ eavr32elf_uc3c2512crevc.o \
++ eavr32elf_uc3l064.o \
++ eavr32elf_uc3l032.o \
++ eavr32elf_uc3l016.o \
++ eavr32elf_uc3l064revb.o \
++ eavr32linux.o \
+ ecoff_i860.o \
+ ecoff_sparc.o \
+ eelf32_spu.o \
+@@ -2069,6 +2116,194 @@ eavr6.c: $(srcdir)/emulparams/avr6.sh $(
+ $(ELF_DEPS) $(srcdir)/scripttempl/avr.sc \
+ ${GEN_DEPENDS}
+ ${GENSCRIPTS} avr6 "$(tdir_avr2)"
++eavr32elf_ap7000.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7000 "$(tdir_avr32)" avr32elf
++eavr32elf_ap7001.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7001 "$(tdir_avr32)" avr32elf
++eavr32elf_ap7002.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7002 "$(tdir_avr32)" avr32elf
++eavr32elf_ap7200.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_ap7200 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a0512es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a0512es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a1512es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a1512es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a364.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a364 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a364s.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a364s "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3128s.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3128s "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3a3256s.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3a3256s "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b064.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b064 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0256es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0256es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b0512revc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b0512revc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b164.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b164 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1128.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1128 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1256.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1256 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1256es.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1256es "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1512.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1512 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3b1512revc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3b1512revc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c064c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c064c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c0128c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c0128c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c0256c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c0256c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c0512crevc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c0512crevc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c164c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c164c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c1128c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c1128c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c1256c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c1256c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c1512crevc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c1512crevc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c264c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c264c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c2128c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c2128c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c2256c.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c2256c "$(tdir_avr32)" avr32elf
++eavr32elf_uc3c2512crevc.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3c2512crevc "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l064.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l064 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l032.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l032 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l016.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l016 "$(tdir_avr32)" avr32elf
++eavr32elf_uc3l064revb.c: $(srcdir)/emulparams/avr32elf.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/avr32.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32elf_uc3l064revb "$(tdir_avr32)" avr32elf
++eavr32linux.c: $(srcdir)/emulparams/avr32linux.sh \
++ $(srcdir)/emultempl/elf32.em $(srcdir)/emultempl/avr32elf.em \
++ $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
++ ${GENSCRIPTS} avr32linux "$(tdir_avr32)"
+ ecoff_i860.c: $(srcdir)/emulparams/coff_i860.sh \
+ $(srcdir)/emultempl/generic.em $(srcdir)/scripttempl/i860coff.sc ${GEN_DEPENDS}
+ ${GENSCRIPTS} coff_i860 "$(tdir_coff_i860)"
+--- a/gas/Makefile.in
++++ b/gas/Makefile.in
+@@ -309,6 +309,7 @@ CPU_TYPES = \
+ arc \
+ arm \
+ avr \
++ avr32 \
+ bfin \
+ cr16 \
+ cris \
+@@ -508,6 +509,7 @@ TARGET_CPU_CFILES = \
+ config/tc-arc.c \
+ config/tc-arm.c \
+ config/tc-avr.c \
++ config/tc-avr32.c \
+ config/tc-bfin.c \
+ config/tc-cr16.c \
+ config/tc-cris.c \
+@@ -571,6 +573,7 @@ TARGET_CPU_HFILES = \
+ config/tc-arc.h \
+ config/tc-arm.h \
+ config/tc-avr.h \
++ config/tc-avr32.h \
+ config/tc-bfin.h \
+ config/tc-cr16.h \
+ config/tc-cris.h \
+@@ -949,6 +952,7 @@ distclean-compile:
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-arc.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-arm.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-avr.Po@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-avr32.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-bfin.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-cr16.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-cris.Po@am__quote@
+@@ -1086,6 +1090,20 @@ tc-avr.obj: config/tc-avr.c
+ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-avr.obj `if test -f 'config/tc-avr.c'; then $(CYGPATH_W) 'config/tc-avr.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-avr.c'; fi`
+
++tc-avr32.o: config/tc-avr32.c
++@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-avr32.o -MD -MP -MF $(DEPDIR)/tc-avr32.Tpo -c -o tc-avr32.o `test -f 'config/tc-avr32.c' || echo '$(srcdir)/'`config/tc-avr32.c
++@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-avr32.Tpo $(DEPDIR)/tc-avr32.Po
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='config/tc-avr32.c' object='tc-avr32.o' libtool=no @AMDEPBACKSLASH@
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-avr32.o `test -f 'config/tc-avr32.c' || echo '$(srcdir)/'`config/tc-avr32.c
++
++tc-avr32.obj: config/tc-avr32.c
++@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-avr32.obj -MD -MP -MF $(DEPDIR)/tc-avr32.Tpo -c -o tc-avr32.obj `if test -f 'config/tc-avr32.c'; then $(CYGPATH_W) 'config/tc-avr32.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-avr32.c'; fi`
++@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-avr32.Tpo $(DEPDIR)/tc-avr32.Po
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='config/tc-avr32.c' object='tc-avr32.obj' libtool=no @AMDEPBACKSLASH@
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-avr32.obj `if test -f 'config/tc-avr32.c'; then $(CYGPATH_W) 'config/tc-avr32.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-avr32.c'; fi`
++
+ tc-bfin.o: config/tc-bfin.c
+ @am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-bfin.o -MD -MP -MF $(DEPDIR)/tc-bfin.Tpo -c -o tc-bfin.o `test -f 'config/tc-bfin.c' || echo '$(srcdir)/'`config/tc-bfin.c
+ @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-bfin.Tpo $(DEPDIR)/tc-bfin.Po
+--- a/bfd/bfd-in2.h
++++ b/bfd/bfd-in2.h
+@@ -2042,6 +2042,12 @@ enum bfd_architecture
+ #define bfd_mach_avr5 5
+ #define bfd_mach_avr51 51
+ #define bfd_mach_avr6 6
++ bfd_arch_avr32, /* Atmel AVR32 */
++#define bfd_mach_avr32_ap 7000
++#define bfd_mach_avr32_uc 3000
++#define bfd_mach_avr32_ucr1 3001
++#define bfd_mach_avr32_ucr2 3002
++#define bfd_mach_avr32_ucr3 3003
+ bfd_arch_bfin, /* ADI Blackfin */
+ #define bfd_mach_bfin 1
+ bfd_arch_cr16, /* National Semiconductor CompactRISC (ie CR16). */
+@@ -3851,6 +3857,88 @@ instructions */
+ instructions */
+ BFD_RELOC_AVR_6_ADIW,
+
++/* Difference between two labels: L2 - L1. The value of L1 is encoded
++as sym + addend, while the initial difference after assembly is
++inserted into the object file by the assembler. */
++ BFD_RELOC_AVR32_DIFF32,
++ BFD_RELOC_AVR32_DIFF16,
++ BFD_RELOC_AVR32_DIFF8,
++
++/* Reference to a symbol through the Global Offset Table. The linker
++will allocate an entry for symbol in the GOT and insert the offset
++of this entry as the relocation value. */
++ BFD_RELOC_AVR32_GOT32,
++ BFD_RELOC_AVR32_GOT16,
++ BFD_RELOC_AVR32_GOT8,
++
++/* Normal (non-pc-relative) code relocations. Alignment and signedness
++is indicated by the suffixes. S means signed, U means unsigned. W
++means word-aligned, H means halfword-aligned, neither means
++byte-aligned (no alignment.) SUB5 is the same relocation as 16S. */
++ BFD_RELOC_AVR32_21S,
++ BFD_RELOC_AVR32_16U,
++ BFD_RELOC_AVR32_16S,
++ BFD_RELOC_AVR32_SUB5,
++ BFD_RELOC_AVR32_8S_EXT,
++ BFD_RELOC_AVR32_8S,
++ BFD_RELOC_AVR32_15S,
++
++/* PC-relative relocations are signed if neither 'U' nor 'S' is
++specified. However, we explicitly tack on a 'B' to indicate no
++alignment, to avoid confusion with data relocs. All of these resolve
++to sym + addend - offset, except the one with 'N' (negated) suffix.
++This particular one resolves to offset - sym - addend. */
++ BFD_RELOC_AVR32_22H_PCREL,
++ BFD_RELOC_AVR32_18W_PCREL,
++ BFD_RELOC_AVR32_16B_PCREL,
++ BFD_RELOC_AVR32_16N_PCREL,
++ BFD_RELOC_AVR32_14UW_PCREL,
++ BFD_RELOC_AVR32_11H_PCREL,
++ BFD_RELOC_AVR32_10UW_PCREL,
++ BFD_RELOC_AVR32_9H_PCREL,
++ BFD_RELOC_AVR32_9UW_PCREL,
++
++/* Subtract the link-time address of the GOT from (symbol + addend)
++and insert the result. */
++ BFD_RELOC_AVR32_GOTPC,
++
++/* Reference to a symbol through the GOT. The linker will allocate an
++entry for symbol in the GOT and insert the offset of this entry as
++the relocation value. addend must be zero. As usual, 'S' means
++signed, 'W' means word-aligned, etc. */
++ BFD_RELOC_AVR32_GOTCALL,
++ BFD_RELOC_AVR32_LDA_GOT,
++ BFD_RELOC_AVR32_GOT21S,
++ BFD_RELOC_AVR32_GOT18SW,
++ BFD_RELOC_AVR32_GOT16S,
++
++/* 32-bit constant pool entry. I don't think 8- and 16-bit entries make
++a whole lot of sense. */
++ BFD_RELOC_AVR32_32_CPENT,
++
++/* Constant pool references. Some of these relocations are signed,
++others are unsigned. It doesn't really matter, since the constant
++pool always comes after the code that references it. */
++ BFD_RELOC_AVR32_CPCALL,
++ BFD_RELOC_AVR32_16_CP,
++ BFD_RELOC_AVR32_9W_CP,
++
++/* sym must be the absolute symbol. The addend specifies the alignment
++order, e.g. if addend is 2, the linker must add padding so that the
++next address is aligned to a 4-byte boundary. */
++ BFD_RELOC_AVR32_ALIGN,
++
++/* Code relocations that will never make it to the output file. */
++ BFD_RELOC_AVR32_14UW,
++ BFD_RELOC_AVR32_10UW,
++ BFD_RELOC_AVR32_10SW,
++ BFD_RELOC_AVR32_STHH_W,
++ BFD_RELOC_AVR32_7UW,
++ BFD_RELOC_AVR32_6S,
++ BFD_RELOC_AVR32_6UW,
++ BFD_RELOC_AVR32_4UH,
++ BFD_RELOC_AVR32_3U,
++
+ /* Direct 12 bit. */
+ BFD_RELOC_390_12,
+
diff --git a/toolchain/gcc/Makefile b/toolchain/gcc/Makefile
index 17ade4d04..68f64e2ff 100644
--- a/toolchain/gcc/Makefile
+++ b/toolchain/gcc/Makefile
@@ -44,7 +44,7 @@ else
GCC_CONFOPTS+= --disable-tls --disable-threads --disable-libatomic
endif
-ifneq ($(ADK_LINUX_AARCH64)$(ADK_LINUX_ALPHA)$(ADK_LINUX_ARC)$(ADK_LINUX_BFIN)$(ADK_LINUX_XTENSA)$(ADK_LINUX_M68K),y)
+ifneq ($(ADK_LINUX_AARCH64)$(ADK_LINUX_ALPHA)$(ADK_LINUX_ARC)$(ADK_LINUX_AVR32)$(ADK_LINUX_BFIN)$(ADK_LINUX_XTENSA)$(ADK_LINUX_M68K),y)
GCC_FINAL_CONFOPTS+= --enable-cxx-flags='$(TARGET_CXXFLAGS)'
endif
diff --git a/toolchain/gcc/Makefile.inc b/toolchain/gcc/Makefile.inc
index 5acfced94..7849939cf 100644
--- a/toolchain/gcc/Makefile.inc
+++ b/toolchain/gcc/Makefile.inc
@@ -33,3 +33,11 @@ PKG_RELEASE:= 1
DISTFILES:= ${PKG_NAME}-${PKG_VERSION}.tar.gz
LIBSTDCXXVER:= 19
endif
+ifeq ($(ADK_TOOLCHAIN_GCC_4_4_7),y)
+PKG_VERSION:= 4.4.7
+PKG_MD5SUM:= e2c60f5ef918be2db08df96c7d97d0c4
+PKG_SITES:= ${MASTER_SITE_GNU:=gcc/gcc-${PKG_VERSION}/}
+PKG_RELEASE:= 1
+DISTFILES:= ${PKG_NAME}-${PKG_VERSION}.tar.gz
+LIBSTDCXXVER:= 19
+endif
diff --git a/toolchain/gcc/patches/4.4.7/930-avr32_support.patch b/toolchain/gcc/patches/4.4.7/930-avr32_support.patch
new file mode 100644
index 000000000..334d2cd13
--- /dev/null
+++ b/toolchain/gcc/patches/4.4.7/930-avr32_support.patch
@@ -0,0 +1,22706 @@
+--- a/gcc/builtins.c
++++ b/gcc/builtins.c
+@@ -11108,7 +11108,7 @@ validate_gimple_arglist (const_gimple ca
+
+ do
+ {
+- code = va_arg (ap, enum tree_code);
++ code = va_arg (ap, int);
+ switch (code)
+ {
+ case 0:
+--- a/gcc/calls.c
++++ b/gcc/calls.c
+@@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r
+ for (; count < nargs; count++)
+ {
+ rtx val = va_arg (p, rtx);
+- enum machine_mode mode = va_arg (p, enum machine_mode);
++ enum machine_mode mode = va_arg (p, int);
+
+ /* We cannot convert the arg value to the mode the library wants here;
+ must do it earlier where we know the signedness of the arg. */
+--- /dev/null
++++ b/gcc/config/avr32/avr32.c
+@@ -0,0 +1,8060 @@
++/*
++ Target hooks and helper functions for AVR32.
++ Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
++
++ This file is part of GCC.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "rtl.h"
++#include "tree.h"
++#include "obstack.h"
++#include "regs.h"
++#include "hard-reg-set.h"
++#include "real.h"
++#include "insn-config.h"
++#include "conditions.h"
++#include "output.h"
++#include "insn-attr.h"
++#include "flags.h"
++#include "reload.h"
++#include "function.h"
++#include "expr.h"
++#include "optabs.h"
++#include "toplev.h"
++#include "recog.h"
++#include "ggc.h"
++#include "except.h"
++#include "c-pragma.h"
++#include "integrate.h"
++#include "tm_p.h"
++#include "langhooks.h"
++#include "hooks.h"
++#include "df.h"
++
++#include "target.h"
++#include "target-def.h"
++
++#include <ctype.h>
++
++
++
++/* Global variables. */
++typedef struct minipool_node Mnode;
++typedef struct minipool_fixup Mfix;
++
++/* Obstack for minipool constant handling. */
++static struct obstack minipool_obstack;
++static char *minipool_startobj;
++static rtx minipool_vector_label;
++
++/* True if we are currently building a constant table. */
++int making_const_table;
++
++tree fndecl_attribute_args = NULL_TREE;
++
++
++/* Function prototypes. */
++static unsigned long avr32_isr_value (tree);
++static unsigned long avr32_compute_func_type (void);
++static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
++static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
++static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
++ int flags, bool * no_add_attrs);
++static void avr32_reorg (void);
++bool avr32_return_in_msb (tree type);
++bool avr32_vector_mode_supported (enum machine_mode mode);
++static void avr32_init_libfuncs (void);
++static void avr32_file_end (void);
++static void flashvault_decl_list_add (unsigned int vector_num, const char *name);
++
++
++
++static void
++avr32_add_gc_roots (void)
++{
++ gcc_obstack_init (&minipool_obstack);
++ minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
++}
++
++
++/* List of all known AVR32 parts */
++static const struct part_type_s avr32_part_types[] = {
++ /* name, part_type, architecture type, macro */
++ {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
++ {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
++ {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
++ {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
++ {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
++ {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0128__"},
++ {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0256__"},
++ {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0512__"},
++ {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A0512ES__"},
++ {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1128__"},
++ {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1256__"},
++ {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1512__"},
++ {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A1512ES__"},
++ {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL, "__AVR32_UC3A3256S__"},
++ {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364__"},
++ {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364S__"},
++ {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128__"},
++ {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128S__"},
++ {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256__"},
++ {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256S__"},
++ {"uc3a464", PART_TYPE_AVR32_UC3A464, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464__"},
++ {"uc3a464s", PART_TYPE_AVR32_UC3A464S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464S__"},
++ {"uc3a4128", PART_TYPE_AVR32_UC3A4128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128__"},
++ {"uc3a4128s", PART_TYPE_AVR32_UC3A4128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128S__"},
++ {"uc3a4256", PART_TYPE_AVR32_UC3A4256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256__"},
++ {"uc3a4256s", PART_TYPE_AVR32_UC3A4256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256S__"},
++ {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B064__"},
++ {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0128__"},
++ {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256__"},
++ {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256ES__"},
++ {"uc3b0512", PART_TYPE_AVR32_UC3B0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512__"},
++ {"uc3b0512revc", PART_TYPE_AVR32_UC3B0512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512REVC__"},
++ {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B164__"},
++ {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1128__"},
++ {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256__"},
++ {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256ES__"},
++ {"uc3b1512", PART_TYPE_AVR32_UC3B1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512__"},
++ {"uc3b1512revc", PART_TYPE_AVR32_UC3B1512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512REVC__"},
++ {"uc64d3", PART_TYPE_AVR32_UC64D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D3__"},
++ {"uc128d3", PART_TYPE_AVR32_UC128D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D3__"},
++ {"uc64d4", PART_TYPE_AVR32_UC64D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D4__"},
++ {"uc128d4", PART_TYPE_AVR32_UC128D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D4__"},
++ {"uc3c0512crevc", PART_TYPE_AVR32_UC3C0512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0512CREVC__"},
++ {"uc3c1512crevc", PART_TYPE_AVR32_UC3C1512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1512CREVC__"},
++ {"uc3c2512crevc", PART_TYPE_AVR32_UC3C2512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2512CREVC__"},
++ {"uc3l0256", PART_TYPE_AVR32_UC3L0256, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0256__"},
++ {"uc3l0128", PART_TYPE_AVR32_UC3L0128, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0128__"},
++ {"uc3l064", PART_TYPE_AVR32_UC3L064, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064__"},
++ {"uc3l032", PART_TYPE_AVR32_UC3L032, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L032__"},
++ {"uc3l016", PART_TYPE_AVR32_UC3L016, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L016__"},
++ {"uc3l064revb", PART_TYPE_AVR32_UC3L064REVB, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064REVB__"},
++ {"uc64l3u", PART_TYPE_AVR32_UC64L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L3U__"},
++ {"uc128l3u", PART_TYPE_AVR32_UC128L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L3U__"},
++ {"uc256l3u", PART_TYPE_AVR32_UC256L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L3U__"},
++ {"uc64l4u", PART_TYPE_AVR32_UC64L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L4U__"},
++ {"uc128l4u", PART_TYPE_AVR32_UC128L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L4U__"},
++ {"uc256l4u", PART_TYPE_AVR32_UC256L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L4U__"},
++ {"uc3c064c", PART_TYPE_AVR32_UC3C064C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C064C__"},
++ {"uc3c0128c", PART_TYPE_AVR32_UC3C0128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0128C__"},
++ {"uc3c0256c", PART_TYPE_AVR32_UC3C0256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0256C__"},
++ {"uc3c0512c", PART_TYPE_AVR32_UC3C0512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0512C__"},
++ {"uc3c164c", PART_TYPE_AVR32_UC3C164C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C164C__"},
++ {"uc3c1128c", PART_TYPE_AVR32_UC3C1128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1128C__"},
++ {"uc3c1256c", PART_TYPE_AVR32_UC3C1256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1256C__"},
++ {"uc3c1512c", PART_TYPE_AVR32_UC3C1512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1512C__"},
++ {"uc3c264c", PART_TYPE_AVR32_UC3C264C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C264C__"},
++ {"uc3c2128c", PART_TYPE_AVR32_UC3C2128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2128C__"},
++ {"uc3c2256c", PART_TYPE_AVR32_UC3C2256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2256C__"},
++ {"uc3c2512c", PART_TYPE_AVR32_UC3C2512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2512C__"},
++ {"mxt768e", PART_TYPE_AVR32_MXT768E, ARCH_TYPE_AVR32_UCR3, "__AVR32_MXT768E__"},
++ {NULL, 0, 0, NULL}
++};
++
++/* List of all known AVR32 architectures */
++static const struct arch_type_s avr32_arch_types[] = {
++ /* name, architecture type, microarchitecture type, feature flags, macro */
++ {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
++ (FLAG_AVR32_HAS_DSP
++ | FLAG_AVR32_HAS_SIMD
++ | FLAG_AVR32_HAS_UNALIGNED_WORD
++ | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
++ | FLAG_AVR32_HAS_CACHES),
++ "__AVR32_AP__"},
++ {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
++ "__AVR32_UC__=1"},
++ {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
++ | FLAG_AVR32_HAS_V2_INSNS),
++ "__AVR32_UC__=2"},
++ {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
++ | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
++ "__AVR32_UC__=2"},
++ {"ucr3", ARCH_TYPE_AVR32_UCR3, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
++ | FLAG_AVR32_HAS_V2_INSNS),
++ "__AVR32_UC__=3"},
++ {"ucr3fp", ARCH_TYPE_AVR32_UCR3FP, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW | FLAG_AVR32_HAS_FPU
++ | FLAG_AVR32_HAS_V2_INSNS),
++ "__AVR32_UC__=3"},
++ {NULL, 0, 0, 0, NULL}
++};
++
++/* Default arch name */
++const char *avr32_arch_name = "none";
++const char *avr32_part_name = "none";
++
++const struct part_type_s *avr32_part;
++const struct arch_type_s *avr32_arch;
++
++
++/* FIXME: needs to use GC. */
++struct flashvault_decl_list
++{
++ struct flashvault_decl_list *next;
++ unsigned int vector_num;
++ const char *name;
++};
++
++static struct flashvault_decl_list *flashvault_decl_list_head = NULL;
++
++
++/* Set default target_flags. */
++#undef TARGET_DEFAULT_TARGET_FLAGS
++#define TARGET_DEFAULT_TARGET_FLAGS \
++ (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
++
++void
++avr32_optimization_options (int level, int size)
++{
++ if (AVR32_ALWAYS_PIC)
++ flag_pic = 1;
++
++ /* Enable section anchors if optimization is enabled. */
++ if (level > 0 || size)
++ flag_section_anchors = 2;
++}
++
++
++/* Override command line options */
++void
++avr32_override_options (void)
++{
++ const struct part_type_s *part;
++ const struct arch_type_s *arch;
++
++ /*Add backward compability*/
++ if (strcmp ("uc", avr32_arch_name)== 0)
++ {
++ fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
++ "Please use '-march=ucr1' instead. "
++ "Converting to arch 'ucr1'\n",
++ avr32_arch_name);
++ avr32_arch_name="ucr1";
++ }
++
++ /* Check if arch type is set. */
++ for (arch = avr32_arch_types; arch->name; arch++)
++ {
++ if (strcmp (arch->name, avr32_arch_name) == 0)
++ break;
++ }
++ avr32_arch = arch;
++
++ if (!arch->name && strcmp("none", avr32_arch_name) != 0)
++ {
++ fprintf (stderr, "Unknown arch `%s' specified\n"
++ "Known arch names:\n"
++ "\tuc (deprecated)\n",
++ avr32_arch_name);
++ for (arch = avr32_arch_types; arch->name; arch++)
++ fprintf (stderr, "\t%s\n", arch->name);
++ avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
++ }
++
++ /* Check if part type is set. */
++ for (part = avr32_part_types; part->name; part++)
++ if (strcmp (part->name, avr32_part_name) == 0)
++ break;
++
++ avr32_part = part;
++ if (!part->name)
++ {
++ fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
++ avr32_part_name);
++ for (part = avr32_part_types; part->name; part++)
++ {
++ if (strcmp("none", part->name) != 0)
++ fprintf (stderr, "\t%s\n", part->name);
++ }
++ /* Set default to NONE*/
++ avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
++ }
++
++ /* NB! option -march= overrides option -mpart
++ * if both are used at the same time */
++ if (!arch->name)
++ avr32_arch = &avr32_arch_types[avr32_part->arch_type];
++
++ /* If optimization level is two or greater, then align start of loops to a
++ word boundary since this will allow folding the first insn of the loop.
++ Do this only for targets supporting branch prediction. */
++ if (optimize >= 2 && TARGET_BRANCH_PRED)
++ align_loops = 2;
++
++
++ /* Enable fast-float library if unsafe math optimizations
++ are used. */
++ if (flag_unsafe_math_optimizations)
++ target_flags |= MASK_FAST_FLOAT;
++
++ /* Check if we should set avr32_imm_in_const_pool
++ based on if caches are present or not. */
++ if ( avr32_imm_in_const_pool == -1 )
++ {
++ if ( TARGET_CACHES )
++ avr32_imm_in_const_pool = 1;
++ else
++ avr32_imm_in_const_pool = 0;
++ }
++
++ if (TARGET_NO_PIC)
++ flag_pic = 0;
++ avr32_add_gc_roots ();
++}
++
++
++/*
++If defined, a function that outputs the assembler code for entry to a
++function. The prologue is responsible for setting up the stack frame,
++initializing the frame pointer register, saving registers that must be
++saved, and allocating size additional bytes of storage for the
++local variables. size is an integer. file is a stdio
++stream to which the assembler code should be output.
++
++The label for the beginning of the function need not be output by this
++macro. That has already been done when the macro is run.
++
++To determine which registers to save, the macro can refer to the array
++regs_ever_live: element r is nonzero if hard register
++r is used anywhere within the function. This implies the function
++prologue should save register r, provided it is not one of the
++call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
++regs_ever_live.)
++
++On machines that have ``register windows'', the function entry code does
++not save on the stack the registers that are in the windows, even if
++they are supposed to be preserved by function calls; instead it takes
++appropriate steps to ``push'' the register stack, if any non-call-used
++registers are used in the function.
++
++On machines where functions may or may not have frame-pointers, the
++function entry code must vary accordingly; it must set up the frame
++pointer if one is wanted, and not otherwise. To determine whether a
++frame pointer is in wanted, the macro can refer to the variable
++frame_pointer_needed. The variable's value will be 1 at run
++time in a function that needs a frame pointer. (see Elimination).
++
++The function entry code is responsible for allocating any stack space
++required for the function. This stack space consists of the regions
++listed below. In most cases, these regions are allocated in the
++order listed, with the last listed region closest to the top of the
++stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
++the highest address if it is not defined). You can use a different order
++for a machine if doing so is more convenient or required for
++compatibility reasons. Except in cases where required by standard
++or by a debugger, there is no reason why the stack layout used by GCC
++need agree with that used by other compilers for a machine.
++*/
++
++#undef TARGET_ASM_FUNCTION_PROLOGUE
++#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
++
++#undef TARGET_ASM_FILE_END
++#define TARGET_ASM_FILE_END avr32_file_end
++
++#undef TARGET_DEFAULT_SHORT_ENUMS
++#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
++
++#undef TARGET_PROMOTE_FUNCTION_ARGS
++#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
++
++#undef TARGET_PROMOTE_FUNCTION_RETURN
++#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
++
++#undef TARGET_PROMOTE_PROTOTYPES
++#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
++
++#undef TARGET_MUST_PASS_IN_STACK
++#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
++
++#undef TARGET_PASS_BY_REFERENCE
++#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
++
++#undef TARGET_STRICT_ARGUMENT_NAMING
++#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
++
++#undef TARGET_VECTOR_MODE_SUPPORTED_P
++#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
++
++#undef TARGET_RETURN_IN_MEMORY
++#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
++
++#undef TARGET_RETURN_IN_MSB
++#define TARGET_RETURN_IN_MSB avr32_return_in_msb
++
++#undef TARGET_ENCODE_SECTION_INFO
++#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
++
++#undef TARGET_ARG_PARTIAL_BYTES
++#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
++
++#undef TARGET_STRIP_NAME_ENCODING
++#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
++
++#define streq(string1, string2) (strcmp (string1, string2) == 0)
++
++#undef TARGET_NARROW_VOLATILE_BITFIELD
++#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
++
++#undef TARGET_ATTRIBUTE_TABLE
++#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
++
++#undef TARGET_COMP_TYPE_ATTRIBUTES
++#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
++
++
++#undef TARGET_RTX_COSTS
++#define TARGET_RTX_COSTS avr32_rtx_costs
++
++#undef TARGET_CANNOT_FORCE_CONST_MEM
++#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
++
++#undef TARGET_ASM_INTEGER
++#define TARGET_ASM_INTEGER avr32_assemble_integer
++
++#undef TARGET_FUNCTION_VALUE
++#define TARGET_FUNCTION_VALUE avr32_function_value
++
++#undef TARGET_MIN_ANCHOR_OFFSET
++#define TARGET_MIN_ANCHOR_OFFSET (0)
++
++#undef TARGET_MAX_ANCHOR_OFFSET
++#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
++#undef TARGET_SECONDARY_RELOAD
++#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
++
++
++/*
++ * Defining the option, -mlist-devices to list the devices supported by gcc.
++ * This option should be used while printing target-help to list all the
++ * supported devices.
++ */
++#undef TARGET_HELP
++#define TARGET_HELP avr32_target_help
++
++void avr32_target_help ()
++{
++ if (avr32_list_supported_parts)
++ {
++ const struct part_type_s *list;
++ fprintf (stdout, "List of parts supported by avr32-gcc:\n");
++ for (list = avr32_part_types; list->name; list++)
++ {
++ if (strcmp("none", list->name) != 0)
++ fprintf (stdout, "%-20s%s\n", list->name, list->macro);
++ }
++ fprintf (stdout, "\n\n");
++ }
++}
++
++enum reg_class
++avr32_secondary_reload (bool in_p, rtx x, enum reg_class class,
++ enum machine_mode mode, secondary_reload_info *sri)
++{
++
++ if ( avr32_rmw_memory_operand (x, mode) )
++ {
++ if (!in_p)
++ sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
++ else
++ sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
++ }
++ return NO_REGS;
++
++}
++/*
++ * Switches to the appropriate section for output of constant pool
++ * entry x in mode. You can assume that x is some kind of constant in
++ * RTL. The argument mode is redundant except in the case of a
++ * const_int rtx. Select the section by calling readonly_data_ section
++ * or one of the alternatives for other sections. align is the
++ * constant alignment in bits.
++ *
++ * The default version of this function takes care of putting symbolic
++ * constants in flag_ pic mode in data_section and everything else in
++ * readonly_data_section.
++ */
++//#undef TARGET_ASM_SELECT_RTX_SECTION
++//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
++
++
++/*
++ * If non-null, this hook performs a target-specific pass over the
++ * instruction stream. The compiler will run it at all optimization
++ * levels, just before the point at which it normally does
++ * delayed-branch scheduling.
++ *
++ * The exact purpose of the hook varies from target to target. Some
++ * use it to do transformations that are necessary for correctness,
++ * such as laying out in-function constant pools or avoiding hardware
++ * hazards. Others use it as an opportunity to do some
++ * machine-dependent optimizations.
++ *
++ * You need not implement the hook if it has nothing to do. The
++ * default definition is null.
++ */
++#undef TARGET_MACHINE_DEPENDENT_REORG
++#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
++
++/* Target hook for assembling integer objects.
++ Need to handle integer vectors */
++static bool
++avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
++{
++ if (avr32_vector_mode_supported (GET_MODE (x)))
++ {
++ int i, units;
++
++ if (GET_CODE (x) != CONST_VECTOR)
++ abort ();
++
++ units = CONST_VECTOR_NUNITS (x);
++
++ switch (GET_MODE (x))
++ {
++ case V2HImode:
++ size = 2;
++ break;
++ case V4QImode:
++ size = 1;
++ break;
++ default:
++ abort ();
++ }
++
++ for (i = 0; i < units; i++)
++ {
++ rtx elt;
++
++ elt = CONST_VECTOR_ELT (x, i);
++ assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
++ }
++
++ return true;
++ }
++
++ return default_assemble_integer (x, size, aligned_p);
++}
++
++
++/*
++ * This target hook describes the relative costs of RTL expressions.
++ *
++ * The cost may depend on the precise form of the expression, which is
++ * available for examination in x, and the rtx code of the expression
++ * in which it is contained, found in outer_code. code is the
++ * expression code--redundant, since it can be obtained with GET_CODE
++ * (x).
++ *
++ * In implementing this hook, you can use the construct COSTS_N_INSNS
++ * (n) to specify a cost equal to n fast instructions.
++ *
++ * On entry to the hook, *total contains a default estimate for the
++ * cost of the expression. The hook should modify this value as
++ * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
++ * for multiplications, COSTS_N_INSNS (7) for division and modulus
++ * operations, and COSTS_N_INSNS (1) for all other operations.
++ *
++ * When optimizing for code size, i.e. when optimize_size is non-zero,
++ * this target hook should be used to estimate the relative size cost
++ * of an expression, again relative to COSTS_N_INSNS.
++ *
++ * The hook returns true when all subexpressions of x have been
++ * processed, and false when rtx_cost should recurse.
++ */
++
++/* Worker routine for avr32_rtx_costs. */
++static inline int
++avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
++ enum rtx_code outer ATTRIBUTE_UNUSED)
++{
++ enum machine_mode mode = GET_MODE (x);
++
++ switch (GET_CODE (x))
++ {
++ case MEM:
++ /* Using pre decrement / post increment memory operations on the
++ avr32_uc architecture means that two writebacks must be performed
++ and hence two cycles are needed. */
++ if (!optimize_size
++ && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
++ && TARGET_ARCH_UC
++ && (GET_CODE (XEXP (x, 0)) == PRE_DEC
++ || GET_CODE (XEXP (x, 0)) == POST_INC))
++ return COSTS_N_INSNS (5);
++
++ /* Memory costs quite a lot for the first word, but subsequent words
++ load at the equivalent of a single insn each. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++ return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
++
++ return COSTS_N_INSNS (4);
++ case SYMBOL_REF:
++ case CONST:
++ /* These are valid for the pseudo insns: lda.w and call which operates
++ on direct addresses. We assume that the cost of a lda.w is the same
++ as the cost of a ld.w insn. */
++ return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
++ case DIV:
++ case MOD:
++ case UDIV:
++ case UMOD:
++ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
++
++ case ROTATE:
++ case ROTATERT:
++ if (mode == TImode)
++ return COSTS_N_INSNS (100);
++
++ if (mode == DImode)
++ return COSTS_N_INSNS (10);
++ return COSTS_N_INSNS (4);
++ case ASHIFT:
++ case LSHIFTRT:
++ case ASHIFTRT:
++ case NOT:
++ if (mode == TImode)
++ return COSTS_N_INSNS (10);
++
++ if (mode == DImode)
++ return COSTS_N_INSNS (4);
++ return COSTS_N_INSNS (1);
++ case PLUS:
++ case MINUS:
++ case NEG:
++ case COMPARE:
++ case ABS:
++ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
++ return COSTS_N_INSNS (100);
++
++ if (mode == TImode)
++ return COSTS_N_INSNS (50);
++
++ if (mode == DImode)
++ return COSTS_N_INSNS (2);
++ return COSTS_N_INSNS (1);
++
++ case MULT:
++ {
++ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
++ return COSTS_N_INSNS (300);
++
++ if (mode == TImode)
++ return COSTS_N_INSNS (16);
++
++ if (mode == DImode)
++ return COSTS_N_INSNS (4);
++
++ if (mode == HImode)
++ return COSTS_N_INSNS (2);
++
++ return COSTS_N_INSNS (3);
++ }
++ case IF_THEN_ELSE:
++ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
++ return COSTS_N_INSNS (4);
++ return COSTS_N_INSNS (1);
++ case SIGN_EXTEND:
++ case ZERO_EXTEND:
++ /* Sign/Zero extensions of registers cost quite much since these
++ instrcutions only take one register operand which means that gcc
++ often must insert some move instrcutions */
++ if (mode == QImode || mode == HImode)
++ return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
++ return COSTS_N_INSNS (4);
++ case UNSPEC:
++ /* divmod operations */
++ if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
++ || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
++ {
++ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
++ }
++ /* Fallthrough */
++ default:
++ return COSTS_N_INSNS (1);
++ }
++}
++
++
++static bool
++avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
++{
++ *total = avr32_rtx_costs_1 (x, code, outer_code);
++ return true;
++}
++
++
++bool
++avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
++{
++ /* Do not want symbols in the constant pool when compiling pic or if using
++ address pseudo instructions. */
++ return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
++ && avr32_find_symbol (x) != NULL_RTX);
++}
++
++
++/* Table of machine attributes. */
++const struct attribute_spec avr32_attribute_table[] = {
++ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
++ /* Interrupt Service Routines have special prologue and epilogue
++ requirements. */
++ {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
++ {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
++ {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
++ {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
++ {"rmw_addressable", 0, 0, true, false, false, NULL},
++ {"flashvault", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
++ {"flashvault_impl", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
++ {NULL, 0, 0, false, false, false, NULL}
++};
++
++
++typedef struct
++{
++ const char *const arg;
++ const unsigned long return_value;
++}
++isr_attribute_arg;
++
++
++static const isr_attribute_arg isr_attribute_args[] = {
++ {"FULL", AVR32_FT_ISR_FULL},
++ {"full", AVR32_FT_ISR_FULL},
++ {"HALF", AVR32_FT_ISR_HALF},
++ {"half", AVR32_FT_ISR_HALF},
++ {"NONE", AVR32_FT_ISR_NONE},
++ {"none", AVR32_FT_ISR_NONE},
++ {"UNDEF", AVR32_FT_ISR_NONE},
++ {"undef", AVR32_FT_ISR_NONE},
++ {"SWI", AVR32_FT_ISR_NONE},
++ {"swi", AVR32_FT_ISR_NONE},
++ {NULL, AVR32_FT_ISR_NONE}
++};
++
++
++/* Returns the (interrupt) function type of the current
++ function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
++static unsigned long
++avr32_isr_value (tree argument)
++{
++ const isr_attribute_arg *ptr;
++ const char *arg;
++
++ /* No argument - default to ISR_NONE. */
++ if (argument == NULL_TREE)
++ return AVR32_FT_ISR_NONE;
++
++ /* Get the value of the argument. */
++ if (TREE_VALUE (argument) == NULL_TREE
++ || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
++ return AVR32_FT_UNKNOWN;
++
++ arg = TREE_STRING_POINTER (TREE_VALUE (argument));
++
++ /* Check it against the list of known arguments. */
++ for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
++ if (streq (arg, ptr->arg))
++ return ptr->return_value;
++
++ /* An unrecognized interrupt type. */
++ return AVR32_FT_UNKNOWN;
++}
++
++
++/*
++These hooks specify assembly directives for creating certain kinds
++of integer object. The TARGET_ASM_BYTE_OP directive creates a
++byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
++aligned two-byte object, and so on. Any of the hooks may be
++NULL, indicating that no suitable directive is available.
++
++The compiler will print these strings at the start of a new line,
++followed immediately by the object's initial value. In most cases,
++the string should contain a tab, a pseudo-op, and then another tab.
++*/
++#undef TARGET_ASM_BYTE_OP
++#define TARGET_ASM_BYTE_OP "\t.byte\t"
++#undef TARGET_ASM_ALIGNED_HI_OP
++#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
++#undef TARGET_ASM_ALIGNED_SI_OP
++#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
++#undef TARGET_ASM_ALIGNED_DI_OP
++#define TARGET_ASM_ALIGNED_DI_OP NULL
++#undef TARGET_ASM_ALIGNED_TI_OP
++#define TARGET_ASM_ALIGNED_TI_OP NULL
++#undef TARGET_ASM_UNALIGNED_HI_OP
++#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
++#undef TARGET_ASM_UNALIGNED_SI_OP
++#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
++#undef TARGET_ASM_UNALIGNED_DI_OP
++#define TARGET_ASM_UNALIGNED_DI_OP NULL
++#undef TARGET_ASM_UNALIGNED_TI_OP
++#define TARGET_ASM_UNALIGNED_TI_OP NULL
++
++#undef TARGET_ASM_OUTPUT_MI_THUNK
++#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
++
++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
++
++
++static void
++avr32_output_mi_thunk (FILE * file,
++ tree thunk ATTRIBUTE_UNUSED,
++ HOST_WIDE_INT delta,
++ HOST_WIDE_INT vcall_offset, tree function)
++ {
++ int mi_delta = delta;
++ int this_regno =
++ (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
++ INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
++
++
++ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
++ || vcall_offset)
++ {
++ fputs ("\tpushm\tlr\n", file);
++ }
++
++
++ if (mi_delta != 0)
++ {
++ if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
++ {
++ fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
++ }
++ else
++ {
++ /* Immediate is larger than k21 we must make us a temp register by
++ pushing a register to the stack. */
++ fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
++ fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
++ fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
++ }
++ }
++
++
++ if (vcall_offset != 0)
++ {
++ fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
++ fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
++ fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
++ }
++
++
++ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
++ || vcall_offset)
++ {
++ fputs ("\tpopm\tlr\n", file);
++ }
++
++ /* Jump to the function. We assume that we can use an rjmp since the
++ function to jump to is local and probably not too far away from
++ the thunk. If this assumption proves to be wrong we could implement
++ this jump by calculating the offset between the jump source and destination
++ and put this in the constant pool and then perform an add to pc.
++ This would also be legitimate PIC code. But for now we hope that an rjmp
++ will be sufficient...
++ */
++ fputs ("\trjmp\t", file);
++ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
++ fputc ('\n', file);
++ }
++
++
++/* Implements target hook vector_mode_supported. */
++bool
++avr32_vector_mode_supported (enum machine_mode mode)
++{
++ if ((mode == V2HImode) || (mode == V4QImode))
++ return true;
++
++ return false;
++}
++
++
++#undef TARGET_INIT_LIBFUNCS
++#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
++
++#undef TARGET_INIT_BUILTINS
++#define TARGET_INIT_BUILTINS avr32_init_builtins
++
++#undef TARGET_EXPAND_BUILTIN
++#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
++
++tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
++ void_ftype_ptr_int;
++tree void_ftype_int, void_ftype_ulong, void_ftype_void, int_ftype_ptr_int;
++tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
++ short_ftype_short_short;
++tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
++tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
++tree longlong_ftype_int_int, void_ftype_int_int_longlong;
++tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
++tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
++
++#define def_builtin(NAME, TYPE, CODE) \
++ add_builtin_function ((NAME), (TYPE), (CODE), \
++ BUILT_IN_MD, NULL, NULL_TREE)
++
++#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
++ do \
++ { \
++ if ((MASK)) \
++ add_builtin_function ((NAME), (TYPE), (CODE), \
++ BUILT_IN_MD, NULL, NULL_TREE); \
++ } \
++ while (0)
++
++struct builtin_description
++{
++ const unsigned int mask;
++ const enum insn_code icode;
++ const char *const name;
++ const int code;
++ const enum rtx_code comparison;
++ const unsigned int flag;
++ const tree *ftype;
++};
++
++static const struct builtin_description bdesc_2arg[] = {
++
++#define DSP_BUILTIN(code, builtin, ftype) \
++ { 1, CODE_FOR_##code, "__builtin_" #code , \
++ AVR32_BUILTIN_##builtin, 0, 0, ftype }
++
++ DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
++ DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
++ DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
++ DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
++ DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
++ DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
++ DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
++ DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
++ DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
++ DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
++ DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
++};
++
++
++void
++avr32_init_builtins (void)
++{
++ unsigned int i;
++ const struct builtin_description *d;
++ tree endlink = void_list_node;
++ tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
++ tree longlong_endlink =
++ tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
++ tree short_endlink =
++ tree_cons (NULL_TREE, short_integer_type_node, endlink);
++ tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
++
++ /* int func (int) */
++ int_ftype_int = build_function_type (integer_type_node, int_endlink);
++
++ /* short func (short) */
++ short_ftype_short
++ = build_function_type (short_integer_type_node, short_endlink);
++
++ /* short func (short, short) */
++ short_ftype_short_short
++ = build_function_type (short_integer_type_node,
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
++
++ /* long long func (long long, short, short) */
++ longlong_ftype_longlong_short_short
++ = build_function_type (long_long_integer_type_node,
++ tree_cons (NULL_TREE, long_long_integer_type_node,
++ tree_cons (NULL_TREE,
++ short_integer_type_node,
++ short_endlink)));
++
++ /* long long func (short, short) */
++ longlong_ftype_short_short
++ = build_function_type (long_long_integer_type_node,
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
++
++ /* int func (int, int) */
++ int_ftype_int_int
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
++
++ /* long long func (int, int) */
++ longlong_ftype_int_int
++ = build_function_type (long_long_integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
++
++ /* long long int func (long long, int, short) */
++ longlong_ftype_longlong_int_short
++ = build_function_type (long_long_integer_type_node,
++ tree_cons (NULL_TREE, long_long_integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink)));
++
++ /* long long int func (int, short) */
++ longlong_ftype_int_short
++ = build_function_type (long_long_integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink));
++
++ /* int func (int, short, short) */
++ int_ftype_int_short_short
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE,
++ short_integer_type_node,
++ short_endlink)));
++
++ /* int func (short, short) */
++ int_ftype_short_short
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
++
++ /* int func (int, short) */
++ int_ftype_int_short
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink));
++
++ /* void func (int, int) */
++ void_ftype_int_int
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
++
++ /* void func (int, int, int) */
++ void_ftype_int_int_int
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink)));
++
++ /* void func (int, int, long long) */
++ void_ftype_int_int_longlong
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ longlong_endlink)));
++
++ /* void func (int, int, int, int, int) */
++ void_ftype_int_int_int_int_int
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE,
++ integer_type_node,
++ tree_cons
++ (NULL_TREE,
++ integer_type_node,
++ int_endlink)))));
++
++ /* void func (void *, int) */
++ void_ftype_ptr_int
++ = build_function_type (void_type_node,
++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
++
++ /* void func (int) */
++ void_ftype_int = build_function_type (void_type_node, int_endlink);
++
++ /* void func (ulong) */
++ void_ftype_ulong = build_function_type_list (void_type_node,
++ long_unsigned_type_node, NULL_TREE);
++
++ /* void func (void) */
++ void_ftype_void = build_function_type (void_type_node, void_endlink);
++
++ /* int func (void) */
++ int_ftype_void = build_function_type (integer_type_node, void_endlink);
++
++ /* int func (void *, int) */
++ int_ftype_ptr_int
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
++
++ /* int func (int, int, int) */
++ int_ftype_int_int_int
++ = build_function_type (integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink)));
++
++ /* Initialize avr32 builtins. */
++ def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
++ def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
++ def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
++ def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
++ def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
++ def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
++ def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
++ def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
++ def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
++ def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
++ def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
++ def_builtin ("__builtin_breakpoint", void_ftype_void,
++ AVR32_BUILTIN_BREAKPOINT);
++ def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
++ def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
++ def_builtin ("__builtin_bswap_16", short_ftype_short,
++ AVR32_BUILTIN_BSWAP16);
++ def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
++ def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
++ AVR32_BUILTIN_COP);
++ def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
++ def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
++ AVR32_BUILTIN_MVRC_W);
++ def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
++ AVR32_BUILTIN_MVCR_D);
++ def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
++ AVR32_BUILTIN_MVRC_D);
++ def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
++ def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
++ def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
++ AVR32_BUILTIN_SATRNDS);
++ def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
++ AVR32_BUILTIN_SATRNDU);
++ def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
++ def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
++ def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
++ AVR32_BUILTIN_MACSATHH_W);
++ def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
++ AVR32_BUILTIN_MACWH_D);
++ def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
++ AVR32_BUILTIN_MACHH_D);
++ def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
++ def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
++ def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
++ def_builtin ("__builtin_sleep", void_ftype_int, AVR32_BUILTIN_SLEEP);
++ def_builtin ("__builtin_avr32_delay_cycles", void_ftype_int, AVR32_BUILTIN_DELAY_CYCLES);
++
++ /* Add all builtins that are more or less simple operations on two
++ operands. */
++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
++ {
++ /* Use one of the operands; the target can have a different mode for
++ mask-generating compares. */
++
++ if (d->name == 0)
++ continue;
++
++ def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
++ }
++}
++
++
++/* Subroutine of avr32_expand_builtin to take care of binop insns. */
++static rtx
++avr32_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
++{
++ rtx pat;
++ tree arg0 = CALL_EXPR_ARG (exp,0);
++ tree arg1 = CALL_EXPR_ARG (exp,1);
++ rtx op0 = expand_normal (arg0);
++ rtx op1 = expand_normal (arg1);
++ enum machine_mode tmode = insn_data[icode].operand[0].mode;
++ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
++ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
++
++ if (!target
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ /* In case the insn wants input operands in modes different from the
++ result, abort. */
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ /* If op0 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op0))
++ op0 = convert_to_mode (mode0, op0, 1);
++ else
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op1))
++ op1 = convert_to_mode (mode1, op1, 1);
++ else
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
++
++
++/* Expand an expression EXP that calls a built-in function,
++ with result going to TARGET if that's convenient
++ (and in mode MODE if that's convenient).
++ SUBTARGET may be used as the target for computing one of EXP's operands.
++ IGNORE is nonzero if the value is to be ignored. */
++rtx
++avr32_expand_builtin (tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ const struct builtin_description *d;
++ unsigned int i;
++ enum insn_code icode = 0;
++ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
++ tree arg0, arg1, arg2;
++ rtx op0, op1, op2, pat;
++ enum machine_mode tmode, mode0, mode1;
++ enum machine_mode arg0_mode;
++ int fcode = DECL_FUNCTION_CODE (fndecl);
++
++ switch (fcode)
++ {
++ default:
++ break;
++
++ case AVR32_BUILTIN_SATS:
++ case AVR32_BUILTIN_SATU:
++ case AVR32_BUILTIN_SATRNDS:
++ case AVR32_BUILTIN_SATRNDU:
++ {
++ const char *fname;
++ switch (fcode)
++ {
++ default:
++ case AVR32_BUILTIN_SATS:
++ icode = CODE_FOR_sats;
++ fname = "sats";
++ break;
++ case AVR32_BUILTIN_SATU:
++ icode = CODE_FOR_satu;
++ fname = "satu";
++ break;
++ case AVR32_BUILTIN_SATRNDS:
++ icode = CODE_FOR_satrnds;
++ fname = "satrnds";
++ break;
++ case AVR32_BUILTIN_SATRNDU:
++ icode = CODE_FOR_satrndu;
++ fname = "satrndu";
++ break;
++ }
++
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ arg2 = CALL_EXPR_ARG (exp,2);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ op2 = expand_normal (arg2);
++
++ tmode = insn_data[icode].operand[0].mode;
++
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
++ {
++ op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
++ }
++
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error ("Parameter 2 to __builtin_%s should be a constant number.",
++ fname);
++ return NULL_RTX;
++ }
++
++ if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
++ {
++ error ("Parameter 3 to __builtin_%s should be a constant number.",
++ fname);
++ return NULL_RTX;
++ }
++
++ emit_move_insn (target, op0);
++ pat = GEN_FCN (icode) (target, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return target;
++ }
++ case AVR32_BUILTIN_MUSTR:
++ icode = CODE_FOR_mustr;
++ tmode = insn_data[icode].operand[0].mode;
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ pat = GEN_FCN (icode) (target);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++
++ case AVR32_BUILTIN_MFSR:
++ icode = CODE_FOR_mfsr;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ op0 = expand_normal (arg0);
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ error ("Parameter 1 to __builtin_mfsr must be a constant number");
++ }
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++ case AVR32_BUILTIN_MTSR:
++ icode = CODE_FOR_mtsr;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ mode0 = insn_data[icode].operand[0].mode;
++ mode1 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
++ {
++ error ("Parameter 1 to __builtin_mtsr must be a constant number");
++ return gen_reg_rtx (mode0);
++ }
++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
++ op1 = copy_to_mode_reg (mode1, op1);
++ pat = GEN_FCN (icode) (op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ case AVR32_BUILTIN_MFDR:
++ icode = CODE_FOR_mfdr;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ op0 = expand_normal (arg0);
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ error ("Parameter 1 to __builtin_mfdr must be a constant number");
++ }
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++ case AVR32_BUILTIN_MTDR:
++ icode = CODE_FOR_mtdr;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ mode0 = insn_data[icode].operand[0].mode;
++ mode1 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
++ {
++ error ("Parameter 1 to __builtin_mtdr must be a constant number");
++ return gen_reg_rtx (mode0);
++ }
++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
++ op1 = copy_to_mode_reg (mode1, op1);
++ pat = GEN_FCN (icode) (op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ case AVR32_BUILTIN_CACHE:
++ icode = CODE_FOR_cache;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ mode0 = insn_data[icode].operand[0].mode;
++ mode1 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
++ {
++ error ("Parameter 2 to __builtin_cache must be a constant number");
++ return gen_reg_rtx (mode1);
++ }
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
++ op0 = copy_to_mode_reg (mode0, op0);
++
++ pat = GEN_FCN (icode) (op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ case AVR32_BUILTIN_SYNC:
++ case AVR32_BUILTIN_MUSFR:
++ case AVR32_BUILTIN_SSRF:
++ case AVR32_BUILTIN_CSRF:
++ {
++ const char *fname;
++ switch (fcode)
++ {
++ default:
++ case AVR32_BUILTIN_SYNC:
++ icode = CODE_FOR_sync;
++ fname = "sync";
++ break;
++ case AVR32_BUILTIN_MUSFR:
++ icode = CODE_FOR_musfr;
++ fname = "musfr";
++ break;
++ case AVR32_BUILTIN_SSRF:
++ icode = CODE_FOR_ssrf;
++ fname = "ssrf";
++ break;
++ case AVR32_BUILTIN_CSRF:
++ icode = CODE_FOR_csrf;
++ fname = "csrf";
++ break;
++ }
++
++ arg0 = CALL_EXPR_ARG (exp,0);
++ op0 = expand_normal (arg0);
++ mode0 = insn_data[icode].operand[0].mode;
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
++ {
++ if (icode == CODE_FOR_musfr)
++ op0 = copy_to_mode_reg (mode0, op0);
++ else
++ {
++ error ("Parameter to __builtin_%s is illegal.", fname);
++ return gen_reg_rtx (mode0);
++ }
++ }
++ pat = GEN_FCN (icode) (op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ }
++ case AVR32_BUILTIN_TLBR:
++ icode = CODE_FOR_tlbr;
++ pat = GEN_FCN (icode) (NULL_RTX);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ case AVR32_BUILTIN_TLBS:
++ icode = CODE_FOR_tlbs;
++ pat = GEN_FCN (icode) (NULL_RTX);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ case AVR32_BUILTIN_TLBW:
++ icode = CODE_FOR_tlbw;
++ pat = GEN_FCN (icode) (NULL_RTX);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ case AVR32_BUILTIN_BREAKPOINT:
++ icode = CODE_FOR_breakpoint;
++ pat = GEN_FCN (icode) (NULL_RTX);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ case AVR32_BUILTIN_XCHG:
++ icode = CODE_FOR_sync_lock_test_and_setsi;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++ mode1 = insn_data[icode].operand[2].mode;
++
++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
++ {
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
++
++ op0 = force_reg (GET_MODE (op0), op0);
++ op0 = gen_rtx_MEM (GET_MODE (op0), op0);
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ error
++ ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
++ }
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++ case AVR32_BUILTIN_LDXI:
++ icode = CODE_FOR_ldxi;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ arg2 = CALL_EXPR_ARG (exp,2);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ op2 = expand_normal (arg2);
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++ mode1 = insn_data[icode].operand[2].mode;
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
++
++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
++ {
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
++
++ if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
++ {
++ error
++ ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
++ return gen_reg_rtx (mode0);
++ }
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ pat = GEN_FCN (icode) (target, op0, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++ case AVR32_BUILTIN_BSWAP16:
++ {
++ icode = CODE_FOR_bswap_16;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
++ mode0 = insn_data[icode].operand[1].mode;
++ if (arg0_mode != mode0)
++ arg0 = build1 (NOP_EXPR,
++ (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
++
++ op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
++ tmode = insn_data[icode].operand[0].mode;
++
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ if ( CONST_INT_P (op0) )
++ {
++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
++ ((INTVAL (op0)&0xff00) >> 8) );
++ /* Sign extend 16-bit value to host wide int */
++ val <<= (HOST_BITS_PER_WIDE_INT - 16);
++ val >>= (HOST_BITS_PER_WIDE_INT - 16);
++ op0 = GEN_INT(val);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ emit_move_insn(target, op0);
++ return target;
++ }
++ else
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ {
++ target = gen_reg_rtx (tmode);
++ }
++
++
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return target;
++ }
++ case AVR32_BUILTIN_BSWAP32:
++ {
++ icode = CODE_FOR_bswap_32;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ op0 = expand_normal (arg0);
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ if ( CONST_INT_P (op0) )
++ {
++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
++ ((INTVAL (op0)&0x0000ff00) << 8) |
++ ((INTVAL (op0)&0x00ff0000) >> 8) |
++ ((INTVAL (op0)&0xff000000) >> 24) );
++ /* Sign extend 32-bit value to host wide int */
++ val <<= (HOST_BITS_PER_WIDE_INT - 32);
++ val >>= (HOST_BITS_PER_WIDE_INT - 32);
++ op0 = GEN_INT(val);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ emit_move_insn(target, op0);
++ return target;
++ }
++ else
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return target;
++ }
++ case AVR32_BUILTIN_MVCR_W:
++ case AVR32_BUILTIN_MVCR_D:
++ {
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++
++ if (fcode == AVR32_BUILTIN_MVCR_W)
++ icode = CODE_FOR_mvcrsi;
++ else
++ icode = CODE_FOR_mvcrdi;
++
++ tmode = insn_data[icode].operand[0].mode;
++
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
++ {
++ error
++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
++
++ if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
++ {
++ error
++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
++
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return target;
++ }
++ case AVR32_BUILTIN_MACSATHH_W:
++ case AVR32_BUILTIN_MACWH_D:
++ case AVR32_BUILTIN_MACHH_D:
++ {
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ arg2 = CALL_EXPR_ARG (exp,2);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ op2 = expand_normal (arg2);
++
++ icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
++ (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
++ CODE_FOR_machh_d);
++
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++ mode1 = insn_data[icode].operand[2].mode;
++
++
++ if (!target
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
++ {
++ /* If op0 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op0))
++ op0 = convert_to_mode (tmode, op0, 1);
++ else
++ op0 = copy_to_mode_reg (tmode, op0);
++ }
++
++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op1))
++ op1 = convert_to_mode (mode0, op1, 1);
++ else
++ op1 = copy_to_mode_reg (mode0, op1);
++ }
++
++ if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op2))
++ op2 = convert_to_mode (mode1, op2, 1);
++ else
++ op2 = copy_to_mode_reg (mode1, op2);
++ }
++
++ emit_move_insn (target, op0);
++
++ pat = GEN_FCN (icode) (target, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++ }
++ case AVR32_BUILTIN_MVRC_W:
++ case AVR32_BUILTIN_MVRC_D:
++ {
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ arg2 = CALL_EXPR_ARG (exp,2);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ op2 = expand_normal (arg2);
++
++ if (fcode == AVR32_BUILTIN_MVRC_W)
++ icode = CODE_FOR_mvrcsi;
++ else
++ icode = CODE_FOR_mvrcdi;
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
++ {
++ error ("Parameter 1 is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
++
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error ("Parameter 2 is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
++
++ if (GET_CODE (op2) == CONST_INT
++ || GET_CODE (op2) == CONST
++ || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
++ {
++ op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
++ }
++
++ if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
++ op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
++
++
++ pat = GEN_FCN (icode) (op0, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return NULL_RTX;
++ }
++ case AVR32_BUILTIN_COP:
++ {
++ rtx op3, op4;
++ tree arg3, arg4;
++ icode = CODE_FOR_cop;
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ arg2 = CALL_EXPR_ARG (exp,2);
++ arg3 = CALL_EXPR_ARG (exp,3);
++ arg4 = CALL_EXPR_ARG (exp,4);
++ op0 = expand_normal (arg0);
++ op1 = expand_normal (arg1);
++ op2 = expand_normal (arg2);
++ op3 = expand_normal (arg3);
++ op4 = expand_normal (arg4);
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
++ {
++ error
++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
++
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error
++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
++
++ if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
++ {
++ error
++ ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
++
++ if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
++ {
++ error
++ ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
++
++ if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
++ {
++ error
++ ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
++ error ("Number should be between 0 and 127.");
++ return NULL_RTX;
++ }
++
++ pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return target;
++ }
++
++ case AVR32_BUILTIN_MEMS:
++ case AVR32_BUILTIN_MEMC:
++ case AVR32_BUILTIN_MEMT:
++ {
++ if (!TARGET_RMW)
++ error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
++
++ switch (fcode) {
++ case AVR32_BUILTIN_MEMS:
++ icode = CODE_FOR_iorsi3;
++ break;
++ case AVR32_BUILTIN_MEMC:
++ icode = CODE_FOR_andsi3;
++ break;
++ case AVR32_BUILTIN_MEMT:
++ icode = CODE_FOR_xorsi3;
++ break;
++ }
++ arg0 = CALL_EXPR_ARG (exp,0);
++ arg1 = CALL_EXPR_ARG (exp,1);
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ if ( GET_CODE (op0) == SYMBOL_REF )
++ // This symbol must be RMW addressable
++ SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
++ op0 = gen_rtx_MEM(SImode, op0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ mode0 = insn_data[icode].operand[1].mode;
++
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
++ }
++
++ if ( !CONST_INT_P (op1)
++ || INTVAL (op1) > 31
++ || INTVAL (op1) < 0 )
++ error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
++
++ if ( fcode == AVR32_BUILTIN_MEMC )
++ op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
++ else
++ op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
++ pat = GEN_FCN (icode) (op0, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return op0;
++ }
++
++ case AVR32_BUILTIN_SLEEP:
++ {
++ arg0 = CALL_EXPR_ARG (exp, 0);
++ op0 = expand_normal (arg0);
++ int intval = INTVAL(op0);
++
++ /* Check if the argument if integer and if the value of integer
++ is greater than 0. */
++
++ if (!CONSTANT_P (op0))
++ error ("Parameter 1 to __builtin_sleep() is not a valid integer.");
++ if (intval < 0 )
++ error ("Parameter 1 to __builtin_sleep() should be an integer greater than 0.");
++
++ int strncmpval = strncmp (avr32_part_name,"uc3l", 4);
++
++ /* Check if op0 is less than 7 for uc3l* and less than 6 for other
++ devices. By this check we are avoiding if operand is less than
++ 256. For more devices, add more such checks. */
++
++ if ( strncmpval == 0 && intval >= 7)
++ error ("Parameter 1 to __builtin_sleep() should be less than or equal to 7.");
++ else if ( strncmp != 0 && intval >= 6)
++ error ("Parameter 1 to __builtin_sleep() should be less than or equal to 6.");
++
++ emit_insn (gen_sleep(op0));
++ return target;
++
++ }
++ case AVR32_BUILTIN_DELAY_CYCLES:
++ {
++ arg0 = CALL_EXPR_ARG (exp, 0);
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++
++ if (TARGET_ARCH_AP)
++ error (" __builtin_avr32_delay_cycles() not supported for \'%s\' architecture.", avr32_arch_name);
++ if (!CONSTANT_P (op0))
++ error ("Parameter 1 to __builtin_avr32_delay_cycles() should be an integer.");
++ emit_insn (gen_delay_cycles (op0));
++ return 0;
++
++ }
++
++ }
++
++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
++ if (d->code == fcode)
++ return avr32_expand_binop_builtin (d->icode, exp, target);
++
++
++ /* @@@ Should really do something sensible here. */
++ return NULL_RTX;
++}
++
++
++/* Handle an "interrupt" or "isr" attribute;
++ arguments as in struct attribute_spec.handler. */
++static tree
++avr32_handle_isr_attribute (tree * node, tree name, tree args,
++ int flags, bool * no_add_attrs)
++{
++ if (DECL_P (*node))
++ {
++ if (TREE_CODE (*node) != FUNCTION_DECL)
++ {
++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++ /* FIXME: the argument if any is checked for type attributes; should it
++ be checked for decl ones? */
++ }
++ else
++ {
++ if (TREE_CODE (*node) == FUNCTION_TYPE
++ || TREE_CODE (*node) == METHOD_TYPE)
++ {
++ if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
++ {
++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++ }
++ else if (TREE_CODE (*node) == POINTER_TYPE
++ && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
++ || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
++ && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
++ {
++ *node = build_variant_type_copy (*node);
++ TREE_TYPE (*node) = build_type_attribute_variant
++ (TREE_TYPE (*node),
++ tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
++ *no_add_attrs = true;
++ }
++ else
++ {
++ /* Possibly pass this attribute on from the type to a decl. */
++ if (flags & ((int) ATTR_FLAG_DECL_NEXT
++ | (int) ATTR_FLAG_FUNCTION_NEXT
++ | (int) ATTR_FLAG_ARRAY_NEXT))
++ {
++ *no_add_attrs = true;
++ return tree_cons (name, args, NULL_TREE);
++ }
++ else
++ {
++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
++ }
++ }
++ }
++
++ return NULL_TREE;
++}
++
++
++/* Handle an attribute requiring a FUNCTION_DECL;
++ arguments as in struct attribute_spec.handler. */
++static tree
++avr32_handle_fndecl_attribute (tree * node, tree name,
++ tree args,
++ int flags ATTRIBUTE_UNUSED,
++ bool * no_add_attrs)
++{
++ if (TREE_CODE (*node) != FUNCTION_DECL)
++ {
++ warning (OPT_Wattributes,"%qs attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ return NULL_TREE;
++ }
++
++ fndecl_attribute_args = args;
++ if (args == NULL_TREE)
++ return NULL_TREE;
++
++ tree value = TREE_VALUE (args);
++ if (TREE_CODE (value) != INTEGER_CST)
++ {
++ warning (OPT_Wattributes,
++ "argument of %qs attribute is not an integer constant",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++
++ return NULL_TREE;
++}
++
++
++/* Handle an acall attribute;
++ arguments as in struct attribute_spec.handler. */
++
++static tree
++avr32_handle_acall_attribute (tree * node, tree name,
++ tree args ATTRIBUTE_UNUSED,
++ int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
++{
++ if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
++ {
++ warning (OPT_Wattributes,"`%s' attribute not yet supported...",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ return NULL_TREE;
++ }
++
++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ return NULL_TREE;
++}
++
++
++bool
++avr32_flashvault_call(tree decl)
++{
++ tree attributes;
++ tree fv_attribute;
++ tree vector_tree;
++ unsigned int vector;
++
++ if (decl && TREE_CODE (decl) == FUNCTION_DECL)
++ {
++ attributes = DECL_ATTRIBUTES(decl);
++ fv_attribute = lookup_attribute ("flashvault", attributes);
++ if (fv_attribute != NULL_TREE)
++ {
++ /* Get attribute parameter, for the function vector number. */
++ /*
++ There is probably an easier, standard way to retrieve the
++ attribute parameter which needs to be done here.
++ */
++ vector_tree = TREE_VALUE(fv_attribute);
++ if (vector_tree != NULL_TREE)
++ {
++ vector = (unsigned int)TREE_INT_CST_LOW(TREE_VALUE(vector_tree));
++ fprintf (asm_out_file,
++ "\tmov\tr8, lo(%i)\t# Load vector number for sscall.\n",
++ vector);
++ }
++
++ fprintf (asm_out_file,
++ "\tsscall\t# Secure system call.\n");
++
++ return true;
++ }
++ }
++
++ return false;
++}
++
++
++static bool has_attribute_p (tree decl, const char *name)
++{
++ if (decl && TREE_CODE (decl) == FUNCTION_DECL)
++ {
++ return (lookup_attribute (name, DECL_ATTRIBUTES(decl)) != NULL_TREE);
++ }
++ return NULL_TREE;
++}
++
++
++/* Return 0 if the attributes for two types are incompatible, 1 if they
++ are compatible, and 2 if they are nearly compatible (which causes a
++ warning to be generated). */
++static int
++avr32_comp_type_attributes (tree type1, tree type2)
++{
++ bool acall1, acall2, isr1, isr2, naked1, naked2, fv1, fv2, fvimpl1, fvimpl2;
++
++ /* Check for mismatch of non-default calling convention. */
++ if (TREE_CODE (type1) != FUNCTION_TYPE)
++ return 1;
++
++ /* Check for mismatched call attributes. */
++ acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
++ acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
++ naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
++ naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
++ fv1 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type1)) != NULL;
++ fv2 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type2)) != NULL;
++ fvimpl1 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type1)) != NULL;
++ fvimpl2 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type2)) != NULL;
++ isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
++ if (!isr1)
++ isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
++
++ isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
++ if (!isr2)
++ isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
++
++ if ((acall1 && isr2)
++ || (acall2 && isr1)
++ || (naked1 && isr2)
++ || (naked2 && isr1)
++ || (fv1 && isr2)
++ || (fv2 && isr1)
++ || (fvimpl1 && isr2)
++ || (fvimpl2 && isr1)
++ || (fv1 && fvimpl2)
++ || (fv2 && fvimpl1)
++ )
++ return 0;
++
++ return 1;
++}
++
++
++/* Computes the type of the current function. */
++static unsigned long
++avr32_compute_func_type (void)
++{
++ unsigned long type = AVR32_FT_UNKNOWN;
++ tree a;
++ tree attr;
++
++ if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
++ abort ();
++
++ /* Decide if the current function is volatile. Such functions never
++ return, and many memory cycles can be saved by not storing register
++ values that will never be needed again. This optimization was added to
++ speed up context switching in a kernel application. */
++ if (optimize > 0
++ && TREE_NOTHROW (current_function_decl)
++ && TREE_THIS_VOLATILE (current_function_decl))
++ type |= AVR32_FT_VOLATILE;
++
++ if (cfun->static_chain_decl != NULL)
++ type |= AVR32_FT_NESTED;
++
++ attr = DECL_ATTRIBUTES (current_function_decl);
++
++ a = lookup_attribute ("isr", attr);
++ if (a == NULL_TREE)
++ a = lookup_attribute ("interrupt", attr);
++
++ if (a == NULL_TREE)
++ type |= AVR32_FT_NORMAL;
++ else
++ type |= avr32_isr_value (TREE_VALUE (a));
++
++
++ a = lookup_attribute ("acall", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_ACALL;
++
++ a = lookup_attribute ("naked", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_NAKED;
++
++ a = lookup_attribute ("flashvault", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_FLASHVAULT;
++
++ a = lookup_attribute ("flashvault_impl", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_FLASHVAULT_IMPL;
++
++ return type;
++}
++
++
++/* Returns the type of the current function. */
++static unsigned long
++avr32_current_func_type (void)
++{
++ if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
++ cfun->machine->func_type = avr32_compute_func_type ();
++
++ return cfun->machine->func_type;
++}
++
++
++/*
++This target hook should return true if we should not pass type solely
++in registers. The file expr.h defines a definition that is usually appropriate,
++refer to expr.h for additional documentation.
++*/
++bool
++avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
++{
++ if (type && AGGREGATE_TYPE_P (type)
++ /* If the alignment is less than the size then pass in the struct on
++ the stack. */
++ && ((unsigned int) TYPE_ALIGN_UNIT (type) <
++ (unsigned int) int_size_in_bytes (type))
++ /* If we support unaligned word accesses then structs of size 4 and 8
++ can have any alignment and still be passed in registers. */
++ && !(TARGET_UNALIGNED_WORD
++ && (int_size_in_bytes (type) == 4
++ || int_size_in_bytes (type) == 8))
++ /* Double word structs need only a word alignment. */
++ && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
++ return true;
++
++ if (type && AGGREGATE_TYPE_P (type)
++ /* Structs of size 3,5,6,7 are always passed in registers. */
++ && (int_size_in_bytes (type) == 3
++ || int_size_in_bytes (type) == 5
++ || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
++ return true;
++
++
++ return (type && TREE_ADDRESSABLE (type));
++}
++
++
++bool
++avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
++{
++ return true;
++}
++
++
++/*
++ This target hook should return true if an argument at the position indicated
++ by cum should be passed by reference. This predicate is queried after target
++ independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
++
++ If the hook returns true, a copy of that argument is made in memory and a
++ pointer to the argument is passed instead of the argument itself. The pointer
++ is passed in whatever way is appropriate for passing a pointer to that type.
++*/
++bool
++avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type, bool named ATTRIBUTE_UNUSED)
++{
++ return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
++}
++
++
++static int
++avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type ATTRIBUTE_UNUSED,
++ bool named ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
++
++
++struct gcc_target targetm = TARGET_INITIALIZER;
++
++/*
++ Table used to convert from register number in the assembler instructions and
++ the register numbers used in gcc.
++*/
++const int avr32_function_arg_reglist[] = {
++ INTERNAL_REGNUM (12),
++ INTERNAL_REGNUM (11),
++ INTERNAL_REGNUM (10),
++ INTERNAL_REGNUM (9),
++ INTERNAL_REGNUM (8)
++};
++
++
++rtx avr32_compare_op0 = NULL_RTX;
++rtx avr32_compare_op1 = NULL_RTX;
++rtx avr32_compare_operator = NULL_RTX;
++rtx avr32_acc_cache = NULL_RTX;
++/* type of branch to use */
++enum avr32_cmp_type avr32_branch_type;
++
++
++/*
++ Returns nonzero if it is allowed to store a value of mode mode in hard
++ register number regno.
++*/
++int
++avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
++{
++ switch (mode)
++ {
++ case DImode: /* long long */
++ case DFmode: /* double */
++ case SCmode: /* __complex__ float */
++ case CSImode: /* __complex__ int */
++ if (regnr < 4)
++ { /* long long int not supported in r12, sp, lr or pc. */
++ return 0;
++ }
++ else
++ {
++ /* long long int has to be referred in even registers. */
++ if (regnr % 2)
++ return 0;
++ else
++ return 1;
++ }
++ case CDImode: /* __complex__ long long */
++ case DCmode: /* __complex__ double */
++ case TImode: /* 16 bytes */
++ if (regnr < 7)
++ return 0;
++ else if (regnr % 2)
++ return 0;
++ else
++ return 1;
++ default:
++ return 1;
++ }
++}
++
++
++int
++avr32_rnd_operands (rtx add, rtx shift)
++{
++ if (GET_CODE (shift) == CONST_INT &&
++ GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
++ {
++ if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++
++int
++avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
++{
++ switch (c)
++ {
++ case 'K':
++ case 'I':
++ {
++ HOST_WIDE_INT min_value = 0, max_value = 0;
++ char size_str[3];
++ int const_size;
++
++ size_str[0] = str[2];
++ size_str[1] = str[3];
++ size_str[2] = '\0';
++ const_size = atoi (size_str);
++
++ if (TOUPPER (str[1]) == 'U')
++ {
++ min_value = 0;
++ max_value = (1 << const_size) - 1;
++ }
++ else if (TOUPPER (str[1]) == 'S')
++ {
++ min_value = -(1 << (const_size - 1));
++ max_value = (1 << (const_size - 1)) - 1;
++ }
++
++ if (c == 'I')
++ {
++ value = -value;
++ }
++
++ if (value >= min_value && value <= max_value)
++ {
++ return 1;
++ }
++ break;
++ }
++ case 'M':
++ return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
++ case 'J':
++ return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
++ case 'O':
++ return one_bit_set_operand (GEN_INT (value), VOIDmode);
++ case 'N':
++ return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
++ case 'L':
++ /* The lower 16-bits are set. */
++ return ((value & 0xffff) == 0xffff) ;
++ }
++
++ return 0;
++}
++
++
++/* Compute mask of registers which needs saving upon function entry. */
++static unsigned long
++avr32_compute_save_reg_mask (int push)
++{
++ unsigned long func_type;
++ unsigned int save_reg_mask = 0;
++ unsigned int reg;
++
++ func_type = avr32_current_func_type ();
++
++ if (IS_INTERRUPT (func_type))
++ {
++ unsigned int max_reg = 12;
++
++ /* Get the banking scheme for the interrupt */
++ switch (func_type)
++ {
++ case AVR32_FT_ISR_FULL:
++ max_reg = 0;
++ break;
++ case AVR32_FT_ISR_HALF:
++ max_reg = 7;
++ break;
++ case AVR32_FT_ISR_NONE:
++ max_reg = 12;
++ break;
++ }
++
++ /* Interrupt functions must not corrupt any registers, even call
++ clobbered ones. If this is a leaf function we can just examine the
++ registers used by the RTL, but otherwise we have to assume that
++ whatever function is called might clobber anything, and so we have
++ to save all the call-clobbered registers as well. */
++
++ /* Need not push the registers r8-r12 for AVR32A architectures, as this
++ is automatially done in hardware. We also do not have any shadow
++ registers. */
++ if (TARGET_UARCH_AVR32A)
++ {
++ max_reg = 7;
++ func_type = AVR32_FT_ISR_NONE;
++ }
++
++ /* All registers which are used and are not shadowed must be saved. */
++ for (reg = 0; reg <= max_reg; reg++)
++ if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
++ || (!current_function_is_leaf
++ && call_used_regs[INTERNAL_REGNUM (reg)]))
++ save_reg_mask |= (1 << reg);
++
++ /* Check LR */
++ if ((df_regs_ever_live_p (LR_REGNUM)
++ || !current_function_is_leaf || frame_pointer_needed)
++ /* Only non-shadowed register models */
++ && (func_type == AVR32_FT_ISR_NONE))
++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
++
++ /* Make sure that the GOT register is pushed. */
++ if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
++ && crtl->uses_pic_offset_table)
++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
++
++ }
++ else
++ {
++ int use_pushm = optimize_size;
++
++ /* In the normal case we only need to save those registers which are
++ call saved and which are used by this function. */
++ for (reg = 0; reg <= 7; reg++)
++ if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
++ && !call_used_regs[INTERNAL_REGNUM (reg)])
++ save_reg_mask |= (1 << reg);
++
++ /* Make sure that the GOT register is pushed. */
++ if (crtl->uses_pic_offset_table)
++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
++
++
++ /* If we optimize for size and do not have anonymous arguments: use
++ pushm/popm always. */
++ if (use_pushm)
++ {
++ if ((save_reg_mask & (1 << 0))
++ || (save_reg_mask & (1 << 1))
++ || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
++ save_reg_mask |= 0xf;
++
++ if ((save_reg_mask & (1 << 4))
++ || (save_reg_mask & (1 << 5))
++ || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
++ save_reg_mask |= 0xf0;
++
++ if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
++ save_reg_mask |= 0x300;
++ }
++
++
++ /* Check LR */
++ if ((df_regs_ever_live_p (LR_REGNUM)
++ || !current_function_is_leaf
++ || (optimize_size
++ && save_reg_mask
++ && !crtl->calls_eh_return)
++ || frame_pointer_needed)
++ && !IS_FLASHVAULT (func_type))
++ {
++ if (push
++ /* Never pop LR into PC for functions which
++ calls __builtin_eh_return, since we need to
++ fix the SP after the restoring of the registers
++ and before returning. */
++ || crtl->calls_eh_return)
++ {
++ /* Push/Pop LR */
++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
++ }
++ else
++ {
++ /* Pop PC */
++ save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
++ }
++ }
++ }
++
++
++ /* Save registers so the exception handler can modify them. */
++ if (crtl->calls_eh_return)
++ {
++ unsigned int i;
++
++ for (i = 0;; i++)
++ {
++ reg = EH_RETURN_DATA_REGNO (i);
++ if (reg == INVALID_REGNUM)
++ break;
++ save_reg_mask |= 1 << ASM_REGNUM (reg);
++ }
++ }
++
++ return save_reg_mask;
++}
++
++
++/* Compute total size in bytes of all saved registers. */
++static int
++avr32_get_reg_mask_size (int reg_mask)
++{
++ int reg, size;
++ size = 0;
++
++ for (reg = 0; reg <= 15; reg++)
++ if (reg_mask & (1 << reg))
++ size += 4;
++
++ return size;
++}
++
++
++/* Get a register from one of the registers which are saved onto the stack
++ upon function entry. */
++static int
++avr32_get_saved_reg (int save_reg_mask)
++{
++ unsigned int reg;
++
++ /* Find the first register which is saved in the saved_reg_mask */
++ for (reg = 0; reg <= 15; reg++)
++ if (save_reg_mask & (1 << reg))
++ return reg;
++
++ return -1;
++}
++
++
++/* Return 1 if it is possible to return using a single instruction. */
++int
++avr32_use_return_insn (int iscond)
++{
++ unsigned int func_type = avr32_current_func_type ();
++ unsigned long saved_int_regs;
++
++ /* Never use a return instruction before reload has run. */
++ if (!reload_completed)
++ return 0;
++
++ /* Must adjust the stack for vararg functions. */
++ if (crtl->args.info.uses_anonymous_args)
++ return 0;
++
++ /* If there a stack adjstment. */
++ if (get_frame_size ())
++ return 0;
++
++ saved_int_regs = avr32_compute_save_reg_mask (TRUE);
++
++ /* Conditional returns can not be performed in one instruction if we need
++ to restore registers from the stack */
++ if (iscond && saved_int_regs)
++ return 0;
++
++ /* Conditional return can not be used for interrupt handlers. */
++ if (iscond && IS_INTERRUPT (func_type))
++ return 0;
++
++ /* For interrupt handlers which needs to pop registers */
++ if (saved_int_regs && IS_INTERRUPT (func_type))
++ return 0;
++
++
++ /* If there are saved registers but the LR isn't saved, then we need two
++ instructions for the return. */
++ if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
++ return 0;
++
++
++ return 1;
++}
++
++
++/* Generate some function prologue info in the assembly file. */
++void
++avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
++{
++ unsigned long func_type = avr32_current_func_type ();
++
++ if (IS_NAKED (func_type))
++ fprintf (f,
++ "\t# Function is naked: Prologue and epilogue provided by programmer\n");
++
++ if (IS_FLASHVAULT (func_type))
++ {
++ fprintf(f,
++ "\t.ident \"flashvault\"\n\t# Function is defined with flashvault attribute.\n");
++ }
++
++ if (IS_FLASHVAULT_IMPL (func_type))
++ {
++ fprintf(f,
++ "\t.ident \"flashvault\"\n\t# Function is defined with flashvault_impl attribute.\n");
++
++ /* Save information on flashvault function declaration. */
++ tree fv_attribute = lookup_attribute ("flashvault_impl", DECL_ATTRIBUTES(current_function_decl));
++ if (fv_attribute != NULL_TREE)
++ {
++ tree vector_tree = TREE_VALUE(fv_attribute);
++ if (vector_tree != NULL_TREE)
++ {
++ unsigned int vector_num;
++ const char * name;
++
++ vector_num = (unsigned int) TREE_INT_CST_LOW (TREE_VALUE (vector_tree));
++
++ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
++
++ flashvault_decl_list_add (vector_num, name);
++ }
++ }
++ }
++
++ if (IS_INTERRUPT (func_type))
++ {
++ switch (func_type)
++ {
++ case AVR32_FT_ISR_FULL:
++ fprintf (f,
++ "\t# Interrupt Function: Fully shadowed register file\n");
++ break;
++ case AVR32_FT_ISR_HALF:
++ fprintf (f,
++ "\t# Interrupt Function: Half shadowed register file\n");
++ break;
++ default:
++ case AVR32_FT_ISR_NONE:
++ fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
++ break;
++ }
++ }
++
++
++ fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
++ crtl->args.size, frame_size,
++ crtl->args.pretend_args_size);
++
++ fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
++ frame_pointer_needed, current_function_is_leaf);
++
++ fprintf (f, "\t# uses_anonymous_args = %i\n",
++ crtl->args.info.uses_anonymous_args);
++
++ if (crtl->calls_eh_return)
++ fprintf (f, "\t# Calls __builtin_eh_return.\n");
++
++}
++
++
++/* Generate and emit an insn that we will recognize as a pushm or stm.
++ Unfortunately, since this insn does not reflect very well the actual
++ semantics of the operation, we need to annotate the insn for the benefit
++ of DWARF2 frame unwind information. */
++
++int avr32_convert_to_reglist16 (int reglist8_vect);
++
++static rtx
++emit_multi_reg_push (int reglist, int usePUSHM)
++{
++ rtx insn;
++ rtx dwarf;
++ rtx tmp;
++ rtx reg;
++ int i;
++ int nr_regs;
++ int index = 0;
++
++ if (usePUSHM)
++ {
++ insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
++ reglist = avr32_convert_to_reglist16 (reglist);
++ }
++ else
++ {
++ insn = emit_insn (gen_stm (stack_pointer_rtx,
++ gen_rtx_CONST_INT (SImode, reglist),
++ gen_rtx_CONST_INT (SImode, 1)));
++ }
++
++ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
++
++ for (i = 15; i >= 0; i--)
++ {
++ if (reglist & (1 << i))
++ {
++ reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
++ tmp = gen_rtx_SET (VOIDmode,
++ gen_rtx_MEM (SImode,
++ plus_constant (stack_pointer_rtx,
++ 4 * index)), reg);
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 1 + index++) = tmp;
++ }
++ }
++
++ tmp = gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ GEN_INT (-4 * nr_regs)));
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 0) = tmp;
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
++ REG_NOTES (insn));
++ return insn;
++}
++
++rtx
++avr32_gen_load_multiple (rtx * regs, int count, rtx from,
++ int write_back, int in_struct_p, int scalar_p)
++{
++
++ rtx result;
++ int i = 0, j;
++
++ result =
++ gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
++
++ if (write_back)
++ {
++ XVECEXP (result, 0, 0)
++ = gen_rtx_SET (GET_MODE (from), from,
++ plus_constant (from, count * 4));
++ i = 1;
++ count++;
++ }
++
++
++ for (j = 0; i < count; i++, j++)
++ {
++ rtx unspec;
++ rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
++ MEM_IN_STRUCT_P (mem) = in_struct_p;
++ MEM_SCALAR_P (mem) = scalar_p;
++ unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
++ XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
++ }
++
++ return result;
++}
++
++
++rtx
++avr32_gen_store_multiple (rtx * regs, int count, rtx to,
++ int in_struct_p, int scalar_p)
++{
++ rtx result;
++ int i = 0, j;
++
++ result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
++
++ for (j = 0; i < count; i++, j++)
++ {
++ rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
++ MEM_IN_STRUCT_P (mem) = in_struct_p;
++ MEM_SCALAR_P (mem) = scalar_p;
++ XVECEXP (result, 0, i)
++ = gen_rtx_SET (VOIDmode, mem,
++ gen_rtx_UNSPEC (VOIDmode,
++ gen_rtvec (1, regs[j]),
++ UNSPEC_STORE_MULTIPLE));
++ }
++
++ return result;
++}
++
++
++/* Move a block of memory if it is word aligned or we support unaligned
++ word memory accesses. The size must be maximum 64 bytes. */
++int
++avr32_gen_movmemsi (rtx * operands)
++{
++ HOST_WIDE_INT bytes_to_go;
++ rtx src, dst;
++ rtx st_src, st_dst;
++ int src_offset = 0, dst_offset = 0;
++ int block_size;
++ int dst_in_struct_p, src_in_struct_p;
++ int dst_scalar_p, src_scalar_p;
++ int unaligned;
++
++ if (GET_CODE (operands[2]) != CONST_INT
++ || GET_CODE (operands[3]) != CONST_INT
++ || INTVAL (operands[2]) > 64
++ || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
++ return 0;
++
++ unaligned = (INTVAL (operands[3]) & 3) != 0;
++
++ block_size = 4;
++
++ st_dst = XEXP (operands[0], 0);
++ st_src = XEXP (operands[1], 0);
++
++ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
++ dst_scalar_p = MEM_SCALAR_P (operands[0]);
++ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
++ src_scalar_p = MEM_SCALAR_P (operands[1]);
++
++ dst = copy_to_mode_reg (SImode, st_dst);
++ src = copy_to_mode_reg (SImode, st_src);
++
++ bytes_to_go = INTVAL (operands[2]);
++
++ while (bytes_to_go)
++ {
++ enum machine_mode move_mode;
++ /* (Seems to be a problem with reloads for the movti pattern so this is
++ disabled until that problem is resolved)
++ UPDATE: Problem seems to be solved now.... */
++ if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
++ /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
++ && !TARGET_ARCH_UC)
++ move_mode = TImode;
++ else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
++ move_mode = DImode;
++ else if (bytes_to_go >= GET_MODE_SIZE (SImode))
++ move_mode = SImode;
++ else
++ move_mode = QImode;
++
++ {
++ rtx src_mem;
++ rtx dst_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_PLUS (SImode, dst,
++ GEN_INT (dst_offset)));
++ dst_offset += GET_MODE_SIZE (move_mode);
++ if ( 0 /* This causes an error in GCC. Think there is
++ something wrong in the gcse pass which causes REQ_EQUIV notes
++ to be wrong so disabling it for now. */
++ && move_mode == TImode
++ && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
++ {
++ src_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_POST_INC (SImode, src));
++ }
++ else
++ {
++ src_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_PLUS (SImode, src,
++ GEN_INT (src_offset)));
++ src_offset += GET_MODE_SIZE (move_mode);
++ }
++
++ bytes_to_go -= GET_MODE_SIZE (move_mode);
++
++ MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
++ MEM_SCALAR_P (dst_mem) = dst_scalar_p;
++
++ MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
++ MEM_SCALAR_P (src_mem) = src_scalar_p;
++ emit_move_insn (dst_mem, src_mem);
++
++ }
++ }
++
++ return 1;
++}
++
++
++/* Expand the prologue instruction. */
++void
++avr32_expand_prologue (void)
++{
++ rtx insn, dwarf;
++ unsigned long saved_reg_mask;
++ int reglist8 = 0;
++
++ /* Naked functions do not have a prologue. */
++ if (IS_NAKED (avr32_current_func_type ()))
++ return;
++
++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
++
++ if (saved_reg_mask)
++ {
++ /* Must push used registers. */
++
++ /* Should we use POPM or LDM? */
++ int usePUSHM = TRUE;
++ reglist8 = 0;
++ if (((saved_reg_mask & (1 << 0)) ||
++ (saved_reg_mask & (1 << 1)) ||
++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
++ {
++ /* One of R0-R3 should at least be pushed. */
++ if (((saved_reg_mask & (1 << 0)) &&
++ (saved_reg_mask & (1 << 1)) &&
++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
++ {
++ /* All should be pushed. */
++ reglist8 |= 0x01;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
++
++ if (((saved_reg_mask & (1 << 4)) ||
++ (saved_reg_mask & (1 << 5)) ||
++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
++ {
++ /* One of R4-R7 should at least be pushed */
++ if (((saved_reg_mask & (1 << 4)) &&
++ (saved_reg_mask & (1 << 5)) &&
++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
++ {
++ if (usePUSHM)
++ /* All should be pushed */
++ reglist8 |= 0x02;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
++
++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
++ {
++ /* One of R8-R9 should at least be pushed. */
++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
++ {
++ if (usePUSHM)
++ /* All should be pushed. */
++ reglist8 |= 0x04;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
++
++ if (saved_reg_mask & (1 << 10))
++ reglist8 |= 0x08;
++
++ if (saved_reg_mask & (1 << 11))
++ reglist8 |= 0x10;
++
++ if (saved_reg_mask & (1 << 12))
++ reglist8 |= 0x20;
++
++ if ((saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
++ && !IS_FLASHVAULT (avr32_current_func_type ()))
++ {
++ /* Push LR */
++ reglist8 |= 0x40;
++ }
++
++ if (usePUSHM)
++ {
++ insn = emit_multi_reg_push (reglist8, TRUE);
++ }
++ else
++ {
++ insn = emit_multi_reg_push (saved_reg_mask, FALSE);
++ }
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ /* Prevent this instruction from being scheduled after any other
++ instructions. */
++ emit_insn (gen_blockage ());
++ }
++
++ /* Set frame pointer */
++ if (frame_pointer_needed)
++ {
++ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++
++ if (get_frame_size () > 0)
++ {
++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
++ {
++ insn = emit_insn (gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ gen_rtx_CONST_INT
++ (SImode,
++ -get_frame_size
++ ()))));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++ else
++ {
++ /* Immediate is larger than k21 We must either check if we can use
++ one of the pushed reegisters as temporary storage or we must
++ make us a temp register by pushing a register to the stack. */
++ rtx temp_reg, const_pool_entry, insn;
++ if (saved_reg_mask)
++ {
++ temp_reg =
++ gen_rtx_REG (SImode,
++ INTERNAL_REGNUM (avr32_get_saved_reg
++ (saved_reg_mask)));
++ }
++ else
++ {
++ temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
++ emit_move_insn (gen_rtx_MEM
++ (SImode,
++ gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
++ temp_reg);
++ }
++
++ const_pool_entry =
++ force_const_mem (SImode,
++ gen_rtx_CONST_INT (SImode, get_frame_size ()));
++ emit_move_insn (temp_reg, const_pool_entry);
++
++ insn = emit_insn (gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_MINUS (SImode,
++ stack_pointer_rtx,
++ temp_reg)));
++
++ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
++ gen_rtx_PLUS (SImode, stack_pointer_rtx,
++ GEN_INT (-get_frame_size ())));
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++ dwarf, REG_NOTES (insn));
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ if (!saved_reg_mask)
++ {
++ insn =
++ emit_move_insn (temp_reg,
++ gen_rtx_MEM (SImode,
++ gen_rtx_POST_INC (SImode,
++ gen_rtx_REG
++ (SImode,
++ 13))));
++ }
++
++ /* Mark the temp register as dead */
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
++ REG_NOTES (insn));
++
++
++ }
++
++ /* Prevent the the stack adjustment to be scheduled after any
++ instructions using the frame pointer. */
++ emit_insn (gen_blockage ());
++ }
++
++ /* Load GOT */
++ if (flag_pic)
++ {
++ avr32_load_pic_register ();
++
++ /* gcc does not know that load or call instructions might use the pic
++ register so it might schedule these instructions before the loading
++ of the pic register. To avoid this emit a barrier for now. TODO!
++ Find out a better way to let gcc know which instructions might use
++ the pic register. */
++ emit_insn (gen_blockage ());
++ }
++ return;
++}
++
++
++void
++avr32_set_return_address (rtx source, rtx scratch)
++{
++ rtx addr;
++ unsigned long saved_regs;
++
++ saved_regs = avr32_compute_save_reg_mask (TRUE);
++
++ if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
++ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
++ else
++ {
++ if (frame_pointer_needed)
++ addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
++ else
++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
++ {
++ addr = plus_constant (stack_pointer_rtx, get_frame_size ());
++ }
++ else
++ {
++ emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
++ addr = scratch;
++ }
++ emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
++ }
++}
++
++
++/* Return the length of INSN. LENGTH is the initial length computed by
++ attributes in the machine-description file. */
++int
++avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
++ int length ATTRIBUTE_UNUSED)
++{
++ return length;
++}
++
++
++void
++avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
++ int iscond ATTRIBUTE_UNUSED,
++ rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
++{
++
++ unsigned long saved_reg_mask;
++ int insert_ret = TRUE;
++ int reglist8 = 0;
++ int stack_adjustment = get_frame_size ();
++ unsigned int func_type = avr32_current_func_type ();
++ FILE *f = asm_out_file;
++
++ /* Naked functions does not have an epilogue */
++ if (IS_NAKED (func_type))
++ return;
++
++ saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
++
++ /* Reset frame pointer */
++ if (stack_adjustment > 0)
++ {
++ if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
++ {
++ fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
++ -stack_adjustment);
++ }
++ else
++ {
++ /* TODO! Is it safe to use r8 as scratch?? */
++ fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
++ -stack_adjustment);
++ fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
++ -stack_adjustment);
++ fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
++ }
++ }
++
++ if (saved_reg_mask)
++ {
++ /* Must pop used registers */
++
++ /* Should we use POPM or LDM? */
++ int usePOPM = TRUE;
++ if (((saved_reg_mask & (1 << 0)) ||
++ (saved_reg_mask & (1 << 1)) ||
++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
++ {
++ /* One of R0-R3 should at least be popped */
++ if (((saved_reg_mask & (1 << 0)) &&
++ (saved_reg_mask & (1 << 1)) &&
++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
++ {
++ /* All should be popped */
++ reglist8 |= 0x01;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
++
++ if (((saved_reg_mask & (1 << 4)) ||
++ (saved_reg_mask & (1 << 5)) ||
++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
++ {
++ /* One of R0-R3 should at least be popped */
++ if (((saved_reg_mask & (1 << 4)) &&
++ (saved_reg_mask & (1 << 5)) &&
++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
++ {
++ if (usePOPM)
++ /* All should be popped */
++ reglist8 |= 0x02;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
++
++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
++ {
++ /* One of R8-R9 should at least be pushed */
++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
++ {
++ if (usePOPM)
++ /* All should be pushed */
++ reglist8 |= 0x04;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
++
++ if (saved_reg_mask & (1 << 10))
++ reglist8 |= 0x08;
++
++ if (saved_reg_mask & (1 << 11))
++ reglist8 |= 0x10;
++
++ if (saved_reg_mask & (1 << 12))
++ reglist8 |= 0x20;
++
++ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
++ /* Pop LR */
++ reglist8 |= 0x40;
++
++ if ((saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
++ && !IS_FLASHVAULT_IMPL (func_type))
++ /* Pop LR into PC. */
++ reglist8 |= 0x80;
++
++ if (usePOPM)
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_reglist8 (reglist8, (char *) reglist);
++
++ if (reglist8 & 0x80)
++ /* This instruction is also a return */
++ insert_ret = FALSE;
++
++ if (r12_imm && !insert_ret)
++ fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
++ else
++ fprintf (f, "\tpopm\t%s\n", reglist);
++
++ }
++ else
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
++ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
++ /* This instruction is also a return */
++ insert_ret = FALSE;
++
++ if (r12_imm && !insert_ret)
++ fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
++ INTVAL (r12_imm));
++ else
++ fprintf (f, "\tldm\tsp++, %s\n", reglist);
++
++ }
++
++ }
++
++ /* Stack adjustment for exception handler. */
++ if (crtl->calls_eh_return)
++ fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
++
++
++ if (IS_INTERRUPT (func_type))
++ {
++ fprintf (f, "\trete\n");
++ }
++ else if (IS_FLASHVAULT (func_type))
++ {
++ /* Normal return from Secure System call, increment SS_RAR before
++ returning. Use R8 as scratch. */
++ fprintf (f,
++ "\t# Normal return from sscall.\n"
++ "\t# Increment SS_RAR before returning.\n"
++ "\t# Use R8 as scratch.\n"
++ "\tmfsr\tr8, 440\n"
++ "\tsub\tr8, -2\n"
++ "\tmtsr\t440, r8\n"
++ "\tretss\n");
++ }
++ else if (insert_ret)
++ {
++ if (r12_imm)
++ fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
++ else
++ fprintf (f, "\tretal\tr12\n");
++ }
++}
++
++void
++avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
++{
++ int i;
++ bool first_reg = true;
++ /* Make sure reglist16_string is empty. */
++ reglist16_string[0] = '\0';
++
++ for (i = 0; i < 16; ++i)
++ {
++ if (reglist16_vect & (1 << i))
++ {
++ first_reg == true ? first_reg = false : strcat(reglist16_string,", ");
++ strcat (reglist16_string, reg_names[INTERNAL_REGNUM (i)]);
++ }
++ }
++}
++
++int
++avr32_convert_to_reglist16 (int reglist8_vect)
++{
++ int reglist16_vect = 0;
++ if (reglist8_vect & 0x1)
++ reglist16_vect |= 0xF;
++ if (reglist8_vect & 0x2)
++ reglist16_vect |= 0xF0;
++ if (reglist8_vect & 0x4)
++ reglist16_vect |= 0x300;
++ if (reglist8_vect & 0x8)
++ reglist16_vect |= 0x400;
++ if (reglist8_vect & 0x10)
++ reglist16_vect |= 0x800;
++ if (reglist8_vect & 0x20)
++ reglist16_vect |= 0x1000;
++ if (reglist8_vect & 0x40)
++ reglist16_vect |= 0x4000;
++ if (reglist8_vect & 0x80)
++ reglist16_vect |= 0x8000;
++
++ return reglist16_vect;
++}
++
++void
++avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
++{
++ /* Make sure reglist8_string is empty. */
++ reglist8_string[0] = '\0';
++
++ if (reglist8_vect & 0x1)
++ strcpy (reglist8_string, "r0-r3");
++ if (reglist8_vect & 0x2)
++ strlen (reglist8_string) ? strcat (reglist8_string, ", r4-r7") :
++ strcpy (reglist8_string, "r4-r7");
++ if (reglist8_vect & 0x4)
++ strlen (reglist8_string) ? strcat (reglist8_string, ", r8-r9") :
++ strcpy (reglist8_string, "r8-r9");
++ if (reglist8_vect & 0x8)
++ strlen (reglist8_string) ? strcat (reglist8_string, ", r10") :
++ strcpy (reglist8_string, "r10");
++ if (reglist8_vect & 0x10)
++ strlen (reglist8_string) ? strcat (reglist8_string, ", r11") :
++ strcpy (reglist8_string, "r11");
++ if (reglist8_vect & 0x20)
++ strlen (reglist8_string) ? strcat (reglist8_string, ", r12") :
++ strcpy (reglist8_string, "r12");
++ if (reglist8_vect & 0x40)
++ strlen (reglist8_string) ? strcat (reglist8_string, ", lr") :
++ strcpy (reglist8_string, "lr");
++ if (reglist8_vect & 0x80)
++ strlen (reglist8_string) ? strcat (reglist8_string, ", pc") :
++ strcpy (reglist8_string, "pc");
++}
++
++
++int
++avr32_eh_return_data_regno (int n)
++{
++ if (n >= 0 && n <= 3)
++ return 8 + n;
++ else
++ return INVALID_REGNUM;
++}
++
++
++/* Compute the distance from register FROM to register TO.
++ These can be the arg pointer, the frame pointer or
++ the stack pointer.
++ Typical stack layout looks like this:
++
++ old stack pointer -> | |
++ ----
++ | | \
++ | | saved arguments for
++ | | vararg functions
++ arg_pointer -> | | /
++ --
++ | | \
++ | | call saved
++ | | registers
++ | | /
++ frame ptr -> --
++ | | \
++ | | local
++ | | variables
++ stack ptr --> | | /
++ --
++ | | \
++ | | outgoing
++ | | arguments
++ | | /
++ --
++
++ For a given funciton some or all of these stack compomnents
++ may not be needed, giving rise to the possibility of
++ eliminating some of the registers.
++
++ The values returned by this function must reflect the behaviour
++ of avr32_expand_prologue() and avr32_compute_save_reg_mask().
++
++ The sign of the number returned reflects the direction of stack
++ growth, so the values are positive for all eliminations except
++ from the soft frame pointer to the hard frame pointer. */
++int
++avr32_initial_elimination_offset (int from, int to)
++{
++ int i;
++ int call_saved_regs = 0;
++ unsigned long saved_reg_mask;
++ unsigned int local_vars = get_frame_size ();
++
++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
++
++ for (i = 0; i < 16; ++i)
++ {
++ if (saved_reg_mask & (1 << i))
++ call_saved_regs += 4;
++ }
++
++ switch (from)
++ {
++ case ARG_POINTER_REGNUM:
++ switch (to)
++ {
++ case STACK_POINTER_REGNUM:
++ return call_saved_regs + local_vars;
++ case FRAME_POINTER_REGNUM:
++ return call_saved_regs;
++ default:
++ abort ();
++ }
++ case FRAME_POINTER_REGNUM:
++ switch (to)
++ {
++ case STACK_POINTER_REGNUM:
++ return local_vars;
++ default:
++ abort ();
++ }
++ default:
++ abort ();
++ }
++}
++
++
++/*
++ Returns a rtx used when passing the next argument to a function.
++ avr32_init_cumulative_args() and avr32_function_arg_advance() sets which
++ register to use.
++*/
++rtx
++avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
++ tree type, int named)
++{
++ int index = -1;
++ //unsigned long func_type = avr32_current_func_type ();
++ //int last_reg_index = (IS_FLASHVAULT(func_type) || IS_FLASHVAULT_IMPL(func_type) || cum->flashvault_func ? LAST_CUM_REG_INDEX - 1 : LAST_CUM_REG_INDEX);
++ int last_reg_index = (cum->flashvault_func ? LAST_CUM_REG_INDEX - 1 : LAST_CUM_REG_INDEX);
++
++ HOST_WIDE_INT arg_size, arg_rsize;
++ if (type)
++ {
++ arg_size = int_size_in_bytes (type);
++ }
++ else
++ {
++ arg_size = GET_MODE_SIZE (mode);
++ }
++ arg_rsize = PUSH_ROUNDING (arg_size);
++
++ /*
++ The last time this macro is called, it is called with mode == VOIDmode,
++ and its result is passed to the call or call_value pattern as operands 2
++ and 3 respectively. */
++ if (mode == VOIDmode)
++ {
++ return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
++ }
++
++ if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
++ {
++ return NULL_RTX;
++ }
++
++ if (arg_rsize == 8)
++ {
++ /* use r11:r10 or r9:r8. */
++ if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
++ index = 1;
++ else if ((last_reg_index == 4) &&
++ !(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
++ index = 3;
++ else
++ index = -1;
++ }
++ else if (arg_rsize == 4)
++ { /* Use first available register */
++ index = 0;
++ while (index <= last_reg_index && GET_USED_INDEX (cum, index))
++ index++;
++ if (index > last_reg_index)
++ index = -1;
++ }
++
++ SET_REG_INDEX (cum, index);
++
++ if (GET_REG_INDEX (cum) >= 0)
++ return gen_rtx_REG (mode, avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
++
++ return NULL_RTX;
++}
++
++
++/* Set the register used for passing the first argument to a function. */
++void
++avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
++ tree fntype ATTRIBUTE_UNUSED,
++ rtx libname ATTRIBUTE_UNUSED,
++ tree fndecl)
++{
++ /* Set all registers as unused. */
++ SET_INDEXES_UNUSED (cum);
++
++ /* Reset uses_anonymous_args */
++ cum->uses_anonymous_args = 0;
++
++ /* Reset size of stack pushed arguments */
++ cum->stack_pushed_args_size = 0;
++
++ cum->flashvault_func = (fndecl && (has_attribute_p (fndecl,"flashvault") || has_attribute_p (fndecl,"flashvault_impl")));
++}
++
++
++/*
++ Set register used for passing the next argument to a function. Only the
++ Scratch Registers are used.
++
++ number name
++ 15 r15 PC
++ 14 r14 LR
++ 13 r13 _SP_________
++ FIRST_CUM_REG 12 r12 _||_
++ 10 r11 ||
++ 11 r10 _||_ Scratch Registers
++ 8 r9 ||
++ LAST_SCRATCH_REG 9 r8 _\/_________
++ 6 r7 /\
++ 7 r6 ||
++ 4 r5 ||
++ 5 r4 ||
++ 2 r3 ||
++ 3 r2 ||
++ 0 r1 ||
++ 1 r0 _||_________
++
++*/
++void
++avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
++ tree type, int named ATTRIBUTE_UNUSED)
++{
++ HOST_WIDE_INT arg_size, arg_rsize;
++
++ if (type)
++ {
++ arg_size = int_size_in_bytes (type);
++ }
++ else
++ {
++ arg_size = GET_MODE_SIZE (mode);
++ }
++ arg_rsize = PUSH_ROUNDING (arg_size);
++
++ /* If the argument had to be passed in stack, no register is used. */
++ if ((*targetm.calls.must_pass_in_stack) (mode, type))
++ {
++ cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
++ return;
++ }
++
++ /* Mark the used registers as "used". */
++ if (GET_REG_INDEX (cum) >= 0)
++ {
++ SET_USED_INDEX (cum, GET_REG_INDEX (cum));
++ if (arg_rsize == 8)
++ {
++ SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
++ }
++ }
++ else
++ {
++ /* Had to use stack */
++ cum->stack_pushed_args_size += arg_rsize;
++ }
++}
++
++
++/*
++ Defines witch direction to go to find the next register to use if the
++ argument is larger then one register or for arguments shorter than an
++ int which is not promoted, such as the last part of structures with
++ size not a multiple of 4. */
++enum direction
++avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type)
++{
++ /* Pad upward for all aggregates except byte and halfword sized aggregates
++ which can be passed in registers. */
++ if (type
++ && AGGREGATE_TYPE_P (type)
++ && (int_size_in_bytes (type) != 1)
++ && !((int_size_in_bytes (type) == 2)
++ && TYPE_ALIGN_UNIT (type) >= 2)
++ && (int_size_in_bytes (type) & 0x3))
++ {
++ return upward;
++ }
++
++ return downward;
++}
++
++
++/* Return a rtx used for the return value from a function call. */
++rtx
++avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
++{
++ if (avr32_return_in_memory (type, func))
++ return NULL_RTX;
++
++ if (int_size_in_bytes (type) <= 4)
++ {
++ enum machine_mode mode = TYPE_MODE (type);
++ int unsignedp = 0;
++ PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
++ return gen_rtx_REG (mode, RET_REGISTER);
++ }
++ else if (int_size_in_bytes (type) <= 8)
++ return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
++
++ return NULL_RTX;
++}
++
++
++/* Return a rtx used for the return value from a library function call. */
++rtx
++avr32_libcall_value (enum machine_mode mode)
++{
++
++ if (GET_MODE_SIZE (mode) <= 4)
++ return gen_rtx_REG (mode, RET_REGISTER);
++ else if (GET_MODE_SIZE (mode) <= 8)
++ return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
++ else
++ return NULL_RTX;
++}
++
++
++/* Return TRUE if X references a SYMBOL_REF. */
++int
++symbol_mentioned_p (rtx x)
++{
++ const char *fmt;
++ int i;
++
++ if (GET_CODE (x) == SYMBOL_REF)
++ return 1;
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
++
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (symbol_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
++
++ return 0;
++}
++
++
++/* Return TRUE if X references a LABEL_REF. */
++int
++label_mentioned_p (rtx x)
++{
++ const char *fmt;
++ int i;
++
++ if (GET_CODE (x) == LABEL_REF)
++ return 1;
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
++
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (label_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
++
++ return 0;
++}
++
++
++/* Return TRUE if X contains a MEM expression. */
++int
++mem_mentioned_p (rtx x)
++{
++ const char *fmt;
++ int i;
++
++ if (MEM_P (x))
++ return 1;
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
++
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (mem_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
++
++ return 0;
++}
++
++
++int
++avr32_legitimate_pic_operand_p (rtx x)
++{
++
++ /* We can't have const, this must be broken down to a symbol. */
++ if (GET_CODE (x) == CONST)
++ return FALSE;
++
++ /* Can't access symbols or labels via the constant pool either */
++ if ((GET_CODE (x) == SYMBOL_REF
++ && CONSTANT_POOL_ADDRESS_P (x)
++ && (symbol_mentioned_p (get_pool_constant (x))
++ || label_mentioned_p (get_pool_constant (x)))))
++ return FALSE;
++
++ return TRUE;
++}
++
++
++rtx
++legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
++ rtx reg)
++{
++
++ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
++ {
++ int subregs = 0;
++
++ if (reg == 0)
++ {
++ if (!can_create_pseudo_p ())
++ abort ();
++ else
++ reg = gen_reg_rtx (Pmode);
++
++ subregs = 1;
++ }
++
++ emit_move_insn (reg, orig);
++
++ /* Only set current function as using pic offset table if flag_pic is
++ set. This is because this function is also used if
++ TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
++ if (flag_pic)
++ crtl->uses_pic_offset_table = 1;
++
++ /* Put a REG_EQUAL note on this insn, so that it can be optimized by
++ loop. */
++ return reg;
++ }
++ else if (GET_CODE (orig) == CONST)
++ {
++ rtx base, offset;
++
++ if (flag_pic
++ && GET_CODE (XEXP (orig, 0)) == PLUS
++ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
++ return orig;
++
++ if (reg == 0)
++ {
++ if (!can_create_pseudo_p ())
++ abort ();
++ else
++ reg = gen_reg_rtx (Pmode);
++ }
++
++ if (GET_CODE (XEXP (orig, 0)) == PLUS)
++ {
++ base =
++ legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
++ offset =
++ legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
++ base == reg ? 0 : reg);
++ }
++ else
++ abort ();
++
++ if (GET_CODE (offset) == CONST_INT)
++ {
++ /* The base register doesn't really matter, we only want to test
++ the index for the appropriate mode. */
++ if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
++ {
++ if (can_create_pseudo_p ())
++ offset = force_reg (Pmode, offset);
++ else
++ abort ();
++ }
++
++ if (GET_CODE (offset) == CONST_INT)
++ return plus_constant (base, INTVAL (offset));
++ }
++
++ return gen_rtx_PLUS (Pmode, base, offset);
++ }
++
++ return orig;
++}
++
++
++/* Generate code to load the PIC register. */
++void
++avr32_load_pic_register (void)
++{
++ rtx l1, pic_tmp;
++ rtx global_offset_table;
++
++ if ((crtl->uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
++ return;
++
++ if (!flag_pic)
++ abort ();
++
++ l1 = gen_label_rtx ();
++
++ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
++ pic_tmp =
++ gen_rtx_CONST (Pmode,
++ gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
++ global_offset_table));
++ emit_insn (gen_pic_load_addr
++ (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
++ emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
++
++ /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
++ can cause life info to screw up. */
++ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
++}
++
++
++/* This hook should return true if values of type type are returned at the most
++ significant end of a register (in other words, if they are padded at the
++ least significant end). You can assume that type is returned in a register;
++ the caller is required to check this. Note that the register provided by
++ FUNCTION_VALUE must be able to hold the complete return value. For example,
++ if a 1-, 2- or 3-byte structure is returned at the most significant end of a
++ 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
++bool
++avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
++{
++ /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
++ ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
++ false; else return true; */
++
++ return false;
++}
++
++
++/*
++ Returns one if a certain function value is going to be returned in memory
++ and zero if it is going to be returned in a register.
++
++ BLKmode and all other modes that is larger than 64 bits are returned in
++ memory.
++*/
++bool
++avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
++{
++ if (TYPE_MODE (type) == VOIDmode)
++ return false;
++
++ if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
++ || int_size_in_bytes (type) == -1)
++ {
++ return true;
++ }
++
++ /* If we have an aggregate then use the same mechanism as when checking if
++ it should be passed on the stack. */
++ if (type
++ && AGGREGATE_TYPE_P (type)
++ && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
++ return true;
++
++ return false;
++}
++
++
++/* Output the constant part of the trampoline.
++ lddpc r0, pc[0x8:e] ; load static chain register
++ lddpc pc, pc[0x8:e] ; jump to subrutine
++ .long 0 ; Address to static chain,
++ ; filled in by avr32_initialize_trampoline()
++ .long 0 ; Address to subrutine,
++ ; filled in by avr32_initialize_trampoline()
++*/
++void
++avr32_trampoline_template (FILE * file)
++{
++ fprintf (file, "\tlddpc r0, pc[8]\n");
++ fprintf (file, "\tlddpc pc, pc[8]\n");
++ /* make room for the address of the static chain. */
++ fprintf (file, "\t.long\t0\n");
++ /* make room for the address to the subrutine. */
++ fprintf (file, "\t.long\t0\n");
++}
++
++
++/* Initialize the variable parts of a trampoline. */
++void
++avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
++{
++ /* Store the address to the static chain. */
++ emit_move_insn (gen_rtx_MEM
++ (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
++ static_chain);
++
++ /* Store the address to the function. */
++ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
++ fnaddr);
++
++ emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
++ gen_rtx_CONST_INT (SImode,
++ AVR32_CACHE_INVALIDATE_ICACHE)));
++}
++
++
++/* Return nonzero if X is valid as an addressing register. */
++int
++avr32_address_register_rtx_p (rtx x, int strict_p)
++{
++ int regno;
++
++ if (!register_operand(x, GET_MODE(x)))
++ return 0;
++
++ /* If strict we require the register to be a hard register. */
++ if (strict_p
++ && !REG_P(x))
++ return 0;
++
++ regno = REGNO (x);
++
++ if (strict_p)
++ return REGNO_OK_FOR_BASE_P (regno);
++
++ return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
++}
++
++
++/* Return nonzero if INDEX is valid for an address index operand. */
++int
++avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
++{
++ enum rtx_code code = GET_CODE (index);
++
++ if (GET_MODE_SIZE (mode) > 8)
++ return 0;
++
++ /* Standard coprocessor addressing modes. */
++ if (code == CONST_INT)
++ {
++ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
++ }
++
++ if (avr32_address_register_rtx_p (index, strict_p))
++ return 1;
++
++ if (code == MULT)
++ {
++ rtx xiop0 = XEXP (index, 0);
++ rtx xiop1 = XEXP (index, 1);
++ return ((avr32_address_register_rtx_p (xiop0, strict_p)
++ && power_of_two_operand (xiop1, SImode)
++ && (INTVAL (xiop1) <= 8))
++ || (avr32_address_register_rtx_p (xiop1, strict_p)
++ && power_of_two_operand (xiop0, SImode)
++ && (INTVAL (xiop0) <= 8)));
++ }
++ else if (code == ASHIFT)
++ {
++ rtx op = XEXP (index, 1);
++
++ return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
++ && GET_CODE (op) == CONST_INT
++ && INTVAL (op) > 0 && INTVAL (op) <= 3);
++ }
++
++ return 0;
++}
++
++
++/*
++ Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
++ the RTX x is a legitimate memory address.
++
++ Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
++ if it is.
++*/
++
++
++/* Forward declaration */
++int is_minipool_label (rtx label);
++
++int
++avr32_legitimate_address (enum machine_mode mode, rtx x, int strict)
++{
++
++ switch (GET_CODE (x))
++ {
++ case REG:
++ return avr32_address_register_rtx_p (x, strict);
++ case CONST_INT:
++ return ((mode==SImode) && TARGET_RMW_ADDRESSABLE_DATA
++ && CONST_OK_FOR_CONSTRAINT_P(INTVAL(x), 'K', "Ks17"));
++ case CONST:
++ {
++ rtx label = avr32_find_symbol (x);
++ if (label
++ &&
++ (/*
++ If we enable (const (plus (symbol_ref ...))) type constant
++ pool entries we must add support for it in the predicates and
++ in the minipool generation in avr32_reorg().
++ (CONSTANT_POOL_ADDRESS_P (label)
++ && !(flag_pic
++ && (symbol_mentioned_p (get_pool_constant (label))
++ || label_mentioned_p (get_pool_constant (label)))))
++ ||*/
++ ((GET_CODE (label) == LABEL_REF)
++ && GET_CODE (XEXP (label, 0)) == CODE_LABEL
++ && is_minipool_label (XEXP (label, 0)))
++ /*|| ((GET_CODE (label) == SYMBOL_REF)
++ && mode == SImode
++ && SYMBOL_REF_RMW_ADDR(label))*/))
++ {
++ return TRUE;
++ }
++ }
++ break;
++ case LABEL_REF:
++ if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
++ && is_minipool_label (XEXP (x, 0)))
++ {
++ return TRUE;
++ }
++ break;
++ case SYMBOL_REF:
++ {
++ if (CONSTANT_POOL_ADDRESS_P (x)
++ && !(flag_pic
++ && (symbol_mentioned_p (get_pool_constant (x))
++ || label_mentioned_p (get_pool_constant (x)))))
++ return TRUE;
++ else if (SYMBOL_REF_RCALL_FUNCTION_P (x)
++ || (mode == SImode
++ && SYMBOL_REF_RMW_ADDR (x)))
++ return TRUE;
++ break;
++ }
++ case PRE_DEC: /* (pre_dec (...)) */
++ case POST_INC: /* (post_inc (...)) */
++ return avr32_address_register_rtx_p (XEXP (x, 0), strict);
++ case PLUS: /* (plus (...) (...)) */
++ {
++ rtx xop0 = XEXP (x, 0);
++ rtx xop1 = XEXP (x, 1);
++
++ return ((avr32_address_register_rtx_p (xop0, strict)
++ && avr32_legitimate_index_p (mode, xop1, strict))
++ || (avr32_address_register_rtx_p (xop1, strict)
++ && avr32_legitimate_index_p (mode, xop0, strict)));
++ }
++ default:
++ break;
++ }
++
++ return FALSE;
++}
++
++
++int
++avr32_const_ok_for_move (HOST_WIDE_INT c)
++{
++ if ( TARGET_V2_INSNS )
++ return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21")
++ /* movh instruction */
++ || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) );
++ else
++ return avr32_const_ok_for_constraint_p (c, 'K', "Ks21");
++}
++
++
++int
++avr32_const_double_immediate (rtx value)
++{
++ HOST_WIDE_INT hi, lo;
++
++ if (GET_CODE (value) != CONST_DOUBLE)
++ return FALSE;
++
++ if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
++ {
++ HOST_WIDE_INT target_float[2];
++ hi = lo = 0;
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
++ GET_MODE (value));
++ lo = target_float[0];
++ hi = target_float[1];
++ }
++ else
++ {
++ hi = CONST_DOUBLE_HIGH (value);
++ lo = CONST_DOUBLE_LOW (value);
++ }
++
++ if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
++ && (GET_MODE (value) == SFmode
++ || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++
++int
++avr32_legitimate_constant_p (rtx x)
++{
++ switch (GET_CODE (x))
++ {
++ case CONST_INT:
++ /* Check if we should put large immediate into constant pool
++ or load them directly with mov/orh.*/
++ if (!avr32_imm_in_const_pool)
++ return 1;
++
++ return avr32_const_ok_for_move (INTVAL (x));
++ case CONST_DOUBLE:
++ /* Check if we should put large immediate into constant pool
++ or load them directly with mov/orh.*/
++ if (!avr32_imm_in_const_pool)
++ return 1;
++
++ if (GET_MODE (x) == SFmode
++ || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
++ return avr32_const_double_immediate (x);
++ else
++ return 0;
++ case LABEL_REF:
++ case SYMBOL_REF:
++ return avr32_find_symbol (x) && (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS);
++ case CONST:
++ case HIGH:
++ case CONST_VECTOR:
++ return 0;
++ default:
++ printf ("%s():\n", __FUNCTION__);
++ debug_rtx (x);
++ return 1;
++ }
++}
++
++
++/* Strip any special encoding from labels */
++const char *
++avr32_strip_name_encoding (const char *name)
++{
++ const char *stripped = name;
++
++ while (1)
++ {
++ switch (stripped[0])
++ {
++ case '#':
++ stripped = strchr (name + 1, '#') + 1;
++ break;
++ case '*':
++ stripped = &stripped[1];
++ break;
++ default:
++ return stripped;
++ }
++ }
++}
++
++
++
++/* Do anything needed before RTL is emitted for each function. */
++static struct machine_function *
++avr32_init_machine_status (void)
++{
++ struct machine_function *machine;
++ machine =
++ (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
++
++#if AVR32_FT_UNKNOWN != 0
++ machine->func_type = AVR32_FT_UNKNOWN;
++#endif
++
++ machine->minipool_label_head = 0;
++ machine->minipool_label_tail = 0;
++ machine->ifcvt_after_reload = 0;
++ return machine;
++}
++
++
++void
++avr32_init_expanders (void)
++{
++ /* Arrange to initialize and mark the machine per-function status. */
++ init_machine_status = avr32_init_machine_status;
++}
++
++
++/* Return an RTX indicating where the return address to the
++ calling function can be found. */
++rtx
++avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
++{
++ if (count != 0)
++ return NULL_RTX;
++
++ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
++}
++
++
++void
++avr32_encode_section_info (tree decl, rtx rtl, int first)
++{
++ default_encode_section_info(decl, rtl, first);
++
++ if ( TREE_CODE (decl) == VAR_DECL
++ && (GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
++ && (lookup_attribute ("rmw_addressable", DECL_ATTRIBUTES (decl))
++ || TARGET_RMW_ADDRESSABLE_DATA) ){
++ if ( !TARGET_RMW || flag_pic )
++ return;
++ // {
++ // warning ("Using RMW addressable data with an arch that does not support RMW instructions.");
++ // return;
++ // }
++ //
++ //if ( flag_pic )
++ // {
++ // warning ("Using RMW addressable data with together with -fpic switch. Can not use RMW instruction when compiling with -fpic.");
++ // return;
++ // }
++ SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
++ }
++}
++
++
++void
++avr32_asm_output_label (FILE * stream, const char *name)
++{
++ name = avr32_strip_name_encoding (name);
++
++ /* Print the label. */
++ assemble_name (stream, name);
++ fprintf (stream, ":\n");
++}
++
++
++void
++avr32_asm_weaken_label (FILE * stream, const char *name)
++{
++ fprintf (stream, "\t.weak ");
++ assemble_name (stream, name);
++ fprintf (stream, "\n");
++}
++
++
++/*
++ Checks if a labelref is equal to a reserved word in the assembler. If it is,
++ insert a '_' before the label name.
++*/
++void
++avr32_asm_output_labelref (FILE * stream, const char *name)
++{
++ int verbatim = FALSE;
++ const char *stripped = name;
++ int strip_finished = FALSE;
++
++ while (!strip_finished)
++ {
++ switch (stripped[0])
++ {
++ case '#':
++ stripped = strchr (name + 1, '#') + 1;
++ break;
++ case '*':
++ stripped = &stripped[1];
++ verbatim = TRUE;
++ break;
++ default:
++ strip_finished = TRUE;
++ break;
++ }
++ }
++
++ if (verbatim)
++ fputs (stripped, stream);
++ else
++ asm_fprintf (stream, "%U%s", stripped);
++}
++
++
++/*
++ Check if the comparison in compare_exp is redundant
++ for the condition given in next_cond given that the
++ needed flags are already set by an earlier instruction.
++ Uses cc_prev_status to check this.
++
++ Returns NULL_RTX if the compare is not redundant
++ or the new condition to use in the conditional
++ instruction if the compare is redundant.
++*/
++static rtx
++is_compare_redundant (rtx compare_exp, rtx next_cond)
++{
++ int z_flag_valid = FALSE;
++ int n_flag_valid = FALSE;
++ rtx new_cond;
++
++ if (GET_CODE (compare_exp) != COMPARE
++ && GET_CODE (compare_exp) != AND)
++ return NULL_RTX;
++
++
++ if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
++ {
++ /* cc0 already contains the correct comparison -> delete cmp insn */
++ return next_cond;
++ }
++
++ if (GET_MODE (compare_exp) != SImode)
++ return NULL_RTX;
++
++ switch (cc_prev_status.mdep.flags)
++ {
++ case CC_SET_VNCZ:
++ case CC_SET_NCZ:
++ n_flag_valid = TRUE;
++ case CC_SET_CZ:
++ case CC_SET_Z:
++ z_flag_valid = TRUE;
++ }
++
++ if (cc_prev_status.mdep.value
++ && GET_CODE (compare_exp) == COMPARE
++ && REG_P (XEXP (compare_exp, 0))
++ && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
++ && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
++ && next_cond != NULL_RTX)
++ {
++ if (INTVAL (XEXP (compare_exp, 1)) == 0
++ && z_flag_valid
++ && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
++ /* We can skip comparison Z flag is already reflecting ops[0] */
++ return next_cond;
++ else if (n_flag_valid
++ && ((INTVAL (XEXP (compare_exp, 1)) == 0
++ && (GET_CODE (next_cond) == GE
++ || GET_CODE (next_cond) == LT))
++ || (INTVAL (XEXP (compare_exp, 1)) == -1
++ && (GET_CODE (next_cond) == GT
++ || GET_CODE (next_cond) == LE))))
++ {
++ /* We can skip comparison N flag is already reflecting ops[0],
++ which means that we can use the mi/pl conditions to check if
++ ops[0] is GE or LT 0. */
++ if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
++ new_cond =
++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
++ UNSPEC_COND_PL);
++ else
++ new_cond =
++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
++ UNSPEC_COND_MI);
++ return new_cond;
++ }
++ }
++ return NULL_RTX;
++}
++
++
++/* Updates cc_status. */
++void
++avr32_notice_update_cc (rtx exp, rtx insn)
++{
++ enum attr_cc attr_cc = get_attr_cc (insn);
++
++ if ( attr_cc == CC_SET_Z_IF_NOT_V2 )
++ {
++ if (TARGET_V2_INSNS)
++ attr_cc = CC_NONE;
++ else
++ attr_cc = CC_SET_Z;
++ }
++
++ switch (attr_cc)
++ {
++ case CC_CALL_SET:
++ CC_STATUS_INIT;
++ /* Check if the function call returns a value in r12 */
++ if (REG_P (recog_data.operand[0])
++ && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
++ {
++ cc_status.flags = 0;
++ cc_status.mdep.value =
++ gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++
++ }
++ break;
++ case CC_COMPARE:
++ {
++ /* Check that compare will not be optimized away if so nothing should
++ be done */
++ rtx compare_exp = SET_SRC (exp);
++ /* Check if we have a tst expression. If so convert it to a
++ compare with 0. */
++ if ( REG_P (SET_SRC (exp)) )
++ compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)),
++ SET_SRC (exp),
++ const0_rtx);
++
++ if (!next_insn_emits_cmp (insn)
++ && (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) == NULL_RTX))
++ {
++
++ /* Reset the nonstandard flag */
++ CC_STATUS_INIT;
++ cc_status.flags = 0;
++ cc_status.mdep.value = compare_exp;
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ }
++ }
++ break;
++ case CC_CMP_COND_INSN:
++ {
++ /* Conditional insn that emit the compare itself. */
++ rtx cmp;
++ rtx cmp_op0, cmp_op1;
++ rtx cond;
++ rtx dest;
++ rtx next_insn = next_nonnote_insn (insn);
++
++ if ( GET_CODE (exp) == COND_EXEC )
++ {
++ cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0);
++ cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1);
++ cond = COND_EXEC_TEST (exp);
++ dest = SET_DEST (COND_EXEC_CODE (exp));
++ }
++ else
++ {
++ /* If then else conditional. compare operands are in operands
++ 4 and 5. */
++ cmp_op0 = recog_data.operand[4];
++ cmp_op1 = recog_data.operand[5];
++ cond = recog_data.operand[1];
++ dest = SET_DEST (exp);
++ }
++
++ if ( GET_CODE (cmp_op0) == AND )
++ cmp = cmp_op0;
++ else
++ cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0),
++ cmp_op0,
++ cmp_op1);
++
++ /* Check if the conditional insns updates a register present
++ in the comparison, if so then we must reset the cc_status. */
++ if (REG_P (dest)
++ && (reg_mentioned_p (dest, cmp_op0)
++ || reg_mentioned_p (dest, cmp_op1))
++ && GET_CODE (exp) != COND_EXEC )
++ {
++ CC_STATUS_INIT;
++ }
++ else if (is_compare_redundant (cmp, cond) == NULL_RTX)
++ {
++ /* Reset the nonstandard flag */
++ CC_STATUS_INIT;
++ if ( GET_CODE (cmp_op0) == AND )
++ {
++ cc_status.flags = CC_INVERTED;
++ cc_status.mdep.flags = CC_SET_Z;
++ }
++ else
++ {
++ cc_status.flags = 0;
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ }
++ cc_status.mdep.value = cmp;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ }
++
++
++ /* Check if we have a COND_EXEC insn which updates one
++ of the registers in the compare status. */
++ if (REG_P (dest)
++ && (reg_mentioned_p (dest, cmp_op0)
++ || reg_mentioned_p (dest, cmp_op1))
++ && GET_CODE (exp) == COND_EXEC )
++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
++
++ if ( cc_status.mdep.cond_exec_cmp_clobbered
++ && GET_CODE (exp) == COND_EXEC
++ && next_insn != NULL
++ && INSN_P (next_insn)
++ && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC
++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0)
++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1)
++ && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond)
++ || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) )
++ {
++ /* We have a sequence of conditional insns where the compare status has been clobbered
++ since the compare no longer reflects the content of the values to compare. */
++ CC_STATUS_INIT;
++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
++ }
++
++ }
++ break;
++ case CC_BLD:
++ /* Bit load is kind of like an inverted testsi, because the Z flag is
++ inverted */
++ CC_STATUS_INIT;
++ cc_status.flags = CC_INVERTED;
++ cc_status.mdep.value = SET_SRC (exp);
++ cc_status.mdep.flags = CC_SET_Z;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ break;
++ case CC_NONE:
++ /* Insn does not affect CC at all. Check if the instruction updates
++ some of the register currently reflected in cc0 */
++
++ if ((GET_CODE (exp) == SET)
++ && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
++ && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
++ || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
++ || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
++ {
++ CC_STATUS_INIT;
++ }
++
++ /* If this is a parallel we must step through each of the parallel
++ expressions */
++ if (GET_CODE (exp) == PARALLEL)
++ {
++ int i;
++ for (i = 0; i < XVECLEN (exp, 0); ++i)
++ {
++ rtx vec_exp = XVECEXP (exp, 0, i);
++ if ((GET_CODE (vec_exp) == SET)
++ && (cc_status.value1 || cc_status.value2
++ || cc_status.mdep.value)
++ && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
++ || reg_mentioned_p (SET_DEST (vec_exp),
++ cc_status.value2)
++ || reg_mentioned_p (SET_DEST (vec_exp),
++ cc_status.mdep.value)))
++ {
++ CC_STATUS_INIT;
++ }
++ }
++ }
++
++ /* Check if we have memory opartions with post_inc or pre_dec on the
++ register currently reflected in cc0 */
++ if (GET_CODE (exp) == SET
++ && GET_CODE (SET_SRC (exp)) == MEM
++ && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
++ || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
++ &&
++ (reg_mentioned_p
++ (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
++ cc_status.value2)
++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
++ cc_status.mdep.value)))
++ CC_STATUS_INIT;
++
++ if (GET_CODE (exp) == SET
++ && GET_CODE (SET_DEST (exp)) == MEM
++ && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
++ || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
++ &&
++ (reg_mentioned_p
++ (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
++ cc_status.value2)
++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
++ cc_status.mdep.value)))
++ CC_STATUS_INIT;
++ break;
++
++ case CC_SET_VNCZ:
++ CC_STATUS_INIT;
++ cc_status.mdep.value = recog_data.operand[0];
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ break;
++
++ case CC_SET_NCZ:
++ CC_STATUS_INIT;
++ cc_status.mdep.value = recog_data.operand[0];
++ cc_status.mdep.flags = CC_SET_NCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ break;
++
++ case CC_SET_CZ:
++ CC_STATUS_INIT;
++ cc_status.mdep.value = recog_data.operand[0];
++ cc_status.mdep.flags = CC_SET_CZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ break;
++
++ case CC_SET_Z:
++ CC_STATUS_INIT;
++ cc_status.mdep.value = recog_data.operand[0];
++ cc_status.mdep.flags = CC_SET_Z;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ break;
++
++ case CC_CLOBBER:
++ CC_STATUS_INIT;
++ break;
++
++ default:
++ CC_STATUS_INIT;
++ }
++}
++
++
++/*
++ Outputs to stdio stream stream the assembler syntax for an instruction
++ operand x. x is an RTL expression.
++*/
++void
++avr32_print_operand (FILE * stream, rtx x, int code)
++{
++ int error = 0;
++
++ if ( code == '?' )
++ {
++ /* Predicable instruction, print condition code */
++
++ /* If the insn should not be conditional then do nothing. */
++ if ( current_insn_predicate == NULL_RTX )
++ return;
++
++ /* Set x to the predicate to force printing
++ the condition later on. */
++ x = current_insn_predicate;
++
++ /* Reverse condition if useing bld insn. */
++ if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND )
++ x = reversed_condition (current_insn_predicate);
++ }
++ else if ( code == '!' )
++ {
++ /* Output compare for conditional insn if needed. */
++ rtx new_cond;
++ gcc_assert ( current_insn_predicate != NULL_RTX );
++ new_cond = avr32_output_cmp(current_insn_predicate,
++ GET_MODE(XEXP(current_insn_predicate,0)),
++ XEXP(current_insn_predicate,0),
++ XEXP(current_insn_predicate,1));
++
++ /* Check if the new condition is a special avr32 condition
++ specified using UNSPECs. If so we must handle it differently. */
++ if ( GET_CODE (new_cond) == UNSPEC )
++ {
++ current_insn_predicate =
++ gen_rtx_UNSPEC (CCmode,
++ gen_rtvec (2,
++ XEXP(current_insn_predicate,0),
++ XEXP(current_insn_predicate,1)),
++ XINT (new_cond, 1));
++ }
++ else
++ {
++ PUT_CODE(current_insn_predicate, GET_CODE(new_cond));
++ }
++ return;
++ }
++
++ switch (GET_CODE (x))
++ {
++ case UNSPEC:
++ switch (XINT (x, 1))
++ {
++ case UNSPEC_COND_PL:
++ if (code == 'i')
++ fputs ("mi", stream);
++ else
++ fputs ("pl", stream);
++ break;
++ case UNSPEC_COND_MI:
++ if (code == 'i')
++ fputs ("pl", stream);
++ else
++ fputs ("mi", stream);
++ break;
++ default:
++ error = 1;
++ }
++ break;
++ case EQ:
++ if (code == 'i')
++ fputs ("ne", stream);
++ else
++ fputs ("eq", stream);
++ break;
++ case NE:
++ if (code == 'i')
++ fputs ("eq", stream);
++ else
++ fputs ("ne", stream);
++ break;
++ case GT:
++ if (code == 'i')
++ fputs ("le", stream);
++ else
++ fputs ("gt", stream);
++ break;
++ case GTU:
++ if (code == 'i')
++ fputs ("ls", stream);
++ else
++ fputs ("hi", stream);
++ break;
++ case LT:
++ if (code == 'i')
++ fputs ("ge", stream);
++ else
++ fputs ("lt", stream);
++ break;
++ case LTU:
++ if (code == 'i')
++ fputs ("hs", stream);
++ else
++ fputs ("lo", stream);
++ break;
++ case GE:
++ if (code == 'i')
++ fputs ("lt", stream);
++ else
++ fputs ("ge", stream);
++ break;
++ case GEU:
++ if (code == 'i')
++ fputs ("lo", stream);
++ else
++ fputs ("hs", stream);
++ break;
++ case LE:
++ if (code == 'i')
++ fputs ("gt", stream);
++ else
++ fputs ("le", stream);
++ break;
++ case LEU:
++ if (code == 'i')
++ fputs ("hi", stream);
++ else
++ fputs ("ls", stream);
++ break;
++ case CONST_INT:
++ {
++ HOST_WIDE_INT value = INTVAL (x);
++
++ switch (code)
++ {
++ case 'm':
++ if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD )
++ {
++ /* A const_int can be used to represent DImode constants. */
++ value >>= BITS_PER_WORD;
++ }
++ /* We might get a const_int immediate for setting a DI register,
++ we then must then return the correct sign extended DI. The most
++ significant word is just a sign extension. */
++ else if (value < 0)
++ value = -1;
++ else
++ value = 0;
++ break;
++ case 'i':
++ value++;
++ break;
++ case 'p':
++ {
++ /* Set to bit position of first bit set in immediate */
++ int i, bitpos = 32;
++ for (i = 0; i < 32; i++)
++ if (value & (1 << i))
++ {
++ bitpos = i;
++ break;
++ }
++ value = bitpos;
++ }
++ break;
++ case 'z':
++ {
++ /* Set to bit position of first bit cleared in immediate */
++ int i, bitpos = 32;
++ for (i = 0; i < 32; i++)
++ if (!(value & (1 << i)))
++ {
++ bitpos = i;
++ break;
++ }
++ value = bitpos;
++ }
++ break;
++ case 'r':
++ {
++ /* Reglist 8 */
++ char op[50];
++ op[0] = '\0';
++
++ if (value & 0x01)
++ strcpy (op, "r0-r3");
++ if (value & 0x02)
++ strlen (op) ? strcat (op, ", r4-r7") : strcpy (op,"r4-r7");
++ if (value & 0x04)
++ strlen (op) ? strcat (op, ", r8-r9") : strcpy (op,"r8-r9");
++ if (value & 0x08)
++ strlen (op) ? strcat (op, ", r10") : strcpy (op,"r10");
++ if (value & 0x10)
++ strlen (op) ? strcat (op, ", r11") : strcpy (op,"r11");
++ if (value & 0x20)
++ strlen (op) ? strcat (op, ", r12") : strcpy (op,"r12");
++ if (value & 0x40)
++ strlen (op) ? strcat (op, ", lr") : strcpy (op, "lr");
++ if (value & 0x80)
++ strlen (op) ? strcat (op, ", pc") : strcpy (op, "pc");
++
++ fputs (op, stream);
++ return;
++ }
++ case 's':
++ {
++ /* Reglist 16 */
++ char reglist16_string[100];
++ int i;
++ bool first_reg = true;
++ reglist16_string[0] = '\0';
++
++ for (i = 0; i < 16; ++i)
++ {
++ if (value & (1 << i))
++ {
++ first_reg == true ? first_reg = false : strcat(reglist16_string,", ");
++ strcat(reglist16_string,reg_names[INTERNAL_REGNUM(i)]);
++ }
++ }
++ fputs (reglist16_string, stream);
++ return;
++ }
++ case 'h':
++ /* Print halfword part of word */
++ fputs (value ? "b" : "t", stream);
++ return;
++ }
++
++ /* Print Value */
++ fprintf (stream, "%d", value);
++ break;
++ }
++ case CONST_DOUBLE:
++ {
++ HOST_WIDE_INT hi, lo;
++ if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
++ {
++ HOST_WIDE_INT target_float[2];
++ hi = lo = 0;
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
++ GET_MODE (x));
++ /* For doubles the most significant part starts at index 0. */
++ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
++ {
++ hi = target_float[0];
++ lo = target_float[1];
++ }
++ else
++ {
++ lo = target_float[0];
++ }
++ }
++ else
++ {
++ hi = CONST_DOUBLE_HIGH (x);
++ lo = CONST_DOUBLE_LOW (x);
++ }
++
++ if (code == 'm')
++ fprintf (stream, "%ld", hi);
++ else
++ fprintf (stream, "%ld", lo);
++
++ break;
++ }
++ case CONST:
++ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
++ fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
++ break;
++ case REG:
++ /* Swap register name if the register is DImode or DFmode. */
++ if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
++ {
++ /* Double register must have an even numbered address */
++ gcc_assert (!(REGNO (x) % 2));
++ if (code == 'm')
++ fputs (reg_names[true_regnum (x)], stream);
++ else
++ fputs (reg_names[true_regnum (x) + 1], stream);
++ }
++ else if (GET_MODE (x) == TImode)
++ {
++ switch (code)
++ {
++ case 'T':
++ fputs (reg_names[true_regnum (x)], stream);
++ break;
++ case 'U':
++ fputs (reg_names[true_regnum (x) + 1], stream);
++ break;
++ case 'L':
++ fputs (reg_names[true_regnum (x) + 2], stream);
++ break;
++ case 'B':
++ fputs (reg_names[true_regnum (x) + 3], stream);
++ break;
++ default:
++ fprintf (stream, "%s, %s, %s, %s",
++ reg_names[true_regnum (x) + 3],
++ reg_names[true_regnum (x) + 2],
++ reg_names[true_regnum (x) + 1],
++ reg_names[true_regnum (x)]);
++ break;
++ }
++ }
++ else
++ {
++ fputs (reg_names[true_regnum (x)], stream);
++ }
++ break;
++ case CODE_LABEL:
++ case LABEL_REF:
++ case SYMBOL_REF:
++ output_addr_const (stream, x);
++ break;
++ case MEM:
++ switch (GET_CODE (XEXP (x, 0)))
++ {
++ case LABEL_REF:
++ case SYMBOL_REF:
++ output_addr_const (stream, XEXP (x, 0));
++ break;
++ case MEM:
++ switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
++ {
++ case SYMBOL_REF:
++ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
++ break;
++ default:
++ error = 1;
++ break;
++ }
++ break;
++ case REG:
++ avr32_print_operand (stream, XEXP (x, 0), 0);
++ if (code != 'p')
++ fputs ("[0]", stream);
++ break;
++ case PRE_DEC:
++ fputs ("--", stream);
++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
++ break;
++ case POST_INC:
++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
++ fputs ("++", stream);
++ break;
++ case PLUS:
++ {
++ rtx op0 = XEXP (XEXP (x, 0), 0);
++ rtx op1 = XEXP (XEXP (x, 0), 1);
++ rtx base = NULL_RTX, offset = NULL_RTX;
++
++ if (avr32_address_register_rtx_p (op0, 1))
++ {
++ base = op0;
++ offset = op1;
++ }
++ else if (avr32_address_register_rtx_p (op1, 1))
++ {
++ /* Operands are switched. */
++ base = op1;
++ offset = op0;
++ }
++
++ gcc_assert (base && offset
++ && avr32_address_register_rtx_p (base, 1)
++ && avr32_legitimate_index_p (GET_MODE (x), offset,
++ 1));
++
++ avr32_print_operand (stream, base, 0);
++ fputs ("[", stream);
++ avr32_print_operand (stream, offset, 0);
++ fputs ("]", stream);
++ break;
++ }
++ case CONST:
++ output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
++ fprintf (stream, " + %ld",
++ INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
++ break;
++ case CONST_INT:
++ avr32_print_operand (stream, XEXP (x, 0), 0);
++ break;
++ default:
++ error = 1;
++ }
++ break;
++ case MULT:
++ {
++ int value = INTVAL (XEXP (x, 1));
++
++ /* Convert immediate in multiplication into a shift immediate */
++ switch (value)
++ {
++ case 2:
++ value = 1;
++ break;
++ case 4:
++ value = 2;
++ break;
++ case 8:
++ value = 3;
++ break;
++ default:
++ value = 0;
++ }
++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
++ value);
++ break;
++ }
++ case ASHIFT:
++ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
++ (int) INTVAL (XEXP (x, 1)));
++ else if (REG_P (XEXP (x, 1)))
++ fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
++ reg_names[true_regnum (XEXP (x, 1))]);
++ else
++ {
++ error = 1;
++ }
++ break;
++ case LSHIFTRT:
++ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
++ fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
++ (int) INTVAL (XEXP (x, 1)));
++ else if (REG_P (XEXP (x, 1)))
++ fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
++ reg_names[true_regnum (XEXP (x, 1))]);
++ else
++ {
++ error = 1;
++ }
++ fprintf (stream, ">>");
++ break;
++ case PARALLEL:
++ {
++ /* Load store multiple */
++ int i;
++ int count = XVECLEN (x, 0);
++ int reglist16 = 0;
++ char reglist16_string[100];
++
++ for (i = 0; i < count; ++i)
++ {
++ rtx vec_elm = XVECEXP (x, 0, i);
++ if (GET_MODE (vec_elm) != SET)
++ {
++ debug_rtx (vec_elm);
++ internal_error ("Unknown element in parallel expression!");
++ }
++ if (GET_MODE (XEXP (vec_elm, 0)) == REG)
++ {
++ /* Load multiple */
++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
++ }
++ else
++ {
++ /* Store multiple */
++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
++ }
++ }
++
++ avr32_make_reglist16 (reglist16, reglist16_string);
++ fputs (reglist16_string, stream);
++
++ break;
++ }
++
++ case PLUS:
++ {
++ rtx op0 = XEXP (x, 0);
++ rtx op1 = XEXP (x, 1);
++ rtx base = NULL_RTX, offset = NULL_RTX;
++
++ if (avr32_address_register_rtx_p (op0, 1))
++ {
++ base = op0;
++ offset = op1;
++ }
++ else if (avr32_address_register_rtx_p (op1, 1))
++ {
++ /* Operands are switched. */
++ base = op1;
++ offset = op0;
++ }
++
++ gcc_assert (base && offset
++ && avr32_address_register_rtx_p (base, 1)
++ && avr32_legitimate_index_p (GET_MODE (x), offset, 1));
++
++ avr32_print_operand (stream, base, 0);
++ fputs ("[", stream);
++ avr32_print_operand (stream, offset, 0);
++ fputs ("]", stream);
++ break;
++ }
++
++ default:
++ error = 1;
++ }
++
++ if (error)
++ {
++ debug_rtx (x);
++ internal_error ("Illegal expression for avr32_print_operand");
++ }
++}
++
++rtx
++avr32_get_note_reg_equiv (rtx insn)
++{
++ rtx note;
++
++ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
++
++ if (note != NULL_RTX)
++ return XEXP (note, 0);
++ else
++ return NULL_RTX;
++}
++
++
++/*
++ Outputs to stdio stream stream the assembler syntax for an instruction
++ operand that is a memory reference whose address is x. x is an RTL
++ expression.
++
++ ToDo: fixme.
++*/
++void
++avr32_print_operand_address (FILE * stream, rtx x)
++{
++ fprintf (stream, "(%d) /* address */", REGNO (x));
++}
++
++
++/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
++bool
++avr32_got_mentioned_p (rtx addr)
++{
++ if (GET_CODE (addr) == MEM)
++ addr = XEXP (addr, 0);
++ while (GET_CODE (addr) == CONST)
++ addr = XEXP (addr, 0);
++ if (GET_CODE (addr) == SYMBOL_REF)
++ {
++ return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
++ }
++ if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
++ {
++ bool l1, l2;
++
++ l1 = avr32_got_mentioned_p (XEXP (addr, 0));
++ l2 = avr32_got_mentioned_p (XEXP (addr, 1));
++ return l1 || l2;
++ }
++ return false;
++}
++
++
++/* Find the symbol in an address expression. */
++rtx
++avr32_find_symbol (rtx addr)
++{
++ if (GET_CODE (addr) == MEM)
++ addr = XEXP (addr, 0);
++
++ while (GET_CODE (addr) == CONST)
++ addr = XEXP (addr, 0);
++
++ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
++ return addr;
++ if (GET_CODE (addr) == PLUS)
++ {
++ rtx l1, l2;
++
++ l1 = avr32_find_symbol (XEXP (addr, 0));
++ l2 = avr32_find_symbol (XEXP (addr, 1));
++ if (l1 != NULL_RTX && l2 == NULL_RTX)
++ return l1;
++ else if (l1 == NULL_RTX && l2 != NULL_RTX)
++ return l2;
++ }
++
++ return NULL_RTX;
++}
++
++
++/* Routines for manipulation of the constant pool. */
++
++/* AVR32 instructions cannot load a large constant directly into a
++ register; they have to come from a pc relative load. The constant
++ must therefore be placed in the addressable range of the pc
++ relative load. Depending on the precise pc relative load
++ instruction the range is somewhere between 256 bytes and 4k. This
++ means that we often have to dump a constant inside a function, and
++ generate code to branch around it.
++
++ It is important to minimize this, since the branches will slow
++ things down and make the code larger.
++
++ Normally we can hide the table after an existing unconditional
++ branch so that there is no interruption of the flow, but in the
++ worst case the code looks like this:
++
++ lddpc rn, L1
++ ...
++ rjmp L2
++ align
++ L1: .long value
++ L2:
++ ...
++
++ lddpc rn, L3
++ ...
++ rjmp L4
++ align
++ L3: .long value
++ L4:
++ ...
++
++ We fix this by performing a scan after scheduling, which notices
++ which instructions need to have their operands fetched from the
++ constant table and builds the table.
++
++ The algorithm starts by building a table of all the constants that
++ need fixing up and all the natural barriers in the function (places
++ where a constant table can be dropped without breaking the flow).
++ For each fixup we note how far the pc-relative replacement will be
++ able to reach and the offset of the instruction into the function.
++
++ Having built the table we then group the fixes together to form
++ tables that are as large as possible (subject to addressing
++ constraints) and emit each table of constants after the last
++ barrier that is within range of all the instructions in the group.
++ If a group does not contain a barrier, then we forcibly create one
++ by inserting a jump instruction into the flow. Once the table has
++ been inserted, the insns are then modified to reference the
++ relevant entry in the pool.
++
++ Possible enhancements to the algorithm (not implemented) are:
++
++ 1) For some processors and object formats, there may be benefit in
++ aligning the pools to the start of cache lines; this alignment
++ would need to be taken into account when calculating addressability
++ of a pool. */
++
++/* These typedefs are located at the start of this file, so that
++ they can be used in the prototypes there. This comment is to
++ remind readers of that fact so that the following structures
++ can be understood more easily.
++
++ typedef struct minipool_node Mnode;
++ typedef struct minipool_fixup Mfix; */
++
++struct minipool_node
++{
++ /* Doubly linked chain of entries. */
++ Mnode *next;
++ Mnode *prev;
++ /* The maximum offset into the code that this entry can be placed. While
++ pushing fixes for forward references, all entries are sorted in order of
++ increasing max_address. */
++ HOST_WIDE_INT max_address;
++ /* Similarly for an entry inserted for a backwards ref. */
++ HOST_WIDE_INT min_address;
++ /* The number of fixes referencing this entry. This can become zero if we
++ "unpush" an entry. In this case we ignore the entry when we come to
++ emit the code. */
++ int refcount;
++ /* The offset from the start of the minipool. */
++ HOST_WIDE_INT offset;
++ /* The value in table. */
++ rtx value;
++ /* The mode of value. */
++ enum machine_mode mode;
++ /* The size of the value. */
++ int fix_size;
++};
++
++
++struct minipool_fixup
++{
++ Mfix *next;
++ rtx insn;
++ HOST_WIDE_INT address;
++ rtx *loc;
++ enum machine_mode mode;
++ int fix_size;
++ rtx value;
++ Mnode *minipool;
++ HOST_WIDE_INT forwards;
++ HOST_WIDE_INT backwards;
++};
++
++
++/* Fixes less than a word need padding out to a word boundary. */
++#define MINIPOOL_FIX_SIZE(mode, value) \
++ (IS_FORCE_MINIPOOL(value) ? 0 : \
++ (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
++
++#define IS_FORCE_MINIPOOL(x) \
++ (GET_CODE(x) == UNSPEC && \
++ XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
++
++static Mnode *minipool_vector_head;
++static Mnode *minipool_vector_tail;
++
++/* The linked list of all minipool fixes required for this function. */
++Mfix *minipool_fix_head;
++Mfix *minipool_fix_tail;
++/* The fix entry for the current minipool, once it has been placed. */
++Mfix *minipool_barrier;
++
++
++/* Determines if INSN is the start of a jump table. Returns the end
++ of the TABLE or NULL_RTX. */
++static rtx
++is_jump_table (rtx insn)
++{
++ rtx table;
++
++ if (GET_CODE (insn) == JUMP_INSN
++ && JUMP_LABEL (insn) != NULL
++ && ((table = next_real_insn (JUMP_LABEL (insn)))
++ == next_real_insn (insn))
++ && table != NULL
++ && GET_CODE (table) == JUMP_INSN
++ && (GET_CODE (PATTERN (table)) == ADDR_VEC
++ || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
++ return table;
++
++ return NULL_RTX;
++}
++
++
++static HOST_WIDE_INT
++get_jump_table_size (rtx insn)
++{
++ /* ADDR_VECs only take room if read-only data does into the text section. */
++ if (JUMP_TABLES_IN_TEXT_SECTION
++#if !defined(READONLY_DATA_SECTION_ASM_OP)
++ || 1
++#endif
++ )
++ {
++ rtx body = PATTERN (insn);
++ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
++
++ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
++ }
++
++ return 0;
++}
++
++
++/* Move a minipool fix MP from its current location to before MAX_MP.
++ If MAX_MP is NULL, then MP doesn't need moving, but the addressing
++ constraints may need updating. */
++static Mnode *
++move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
++ HOST_WIDE_INT max_address)
++{
++ /* This should never be true and the code below assumes these are
++ different. */
++ if (mp == max_mp)
++ abort ();
++
++ if (max_mp == NULL)
++ {
++ if (max_address < mp->max_address)
++ mp->max_address = max_address;
++ }
++ else
++ {
++ if (max_address > max_mp->max_address - mp->fix_size)
++ mp->max_address = max_mp->max_address - mp->fix_size;
++ else
++ mp->max_address = max_address;
++
++ /* Unlink MP from its current position. Since max_mp is non-null,
++ mp->prev must be non-null. */
++ mp->prev->next = mp->next;
++ if (mp->next != NULL)
++ mp->next->prev = mp->prev;
++ else
++ minipool_vector_tail = mp->prev;
++
++ /* Re-insert it before MAX_MP. */
++ mp->next = max_mp;
++ mp->prev = max_mp->prev;
++ max_mp->prev = mp;
++
++ if (mp->prev != NULL)
++ mp->prev->next = mp;
++ else
++ minipool_vector_head = mp;
++ }
++
++ /* Save the new entry. */
++ max_mp = mp;
++
++ /* Scan over the preceding entries and adjust their addresses as required.
++ */
++ while (mp->prev != NULL
++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
++ {
++ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
++ mp = mp->prev;
++ }
++
++ return max_mp;
++}
++
++
++/* Add a constant to the minipool for a forward reference. Returns the
++ node added or NULL if the constant will not fit in this pool. */
++static Mnode *
++add_minipool_forward_ref (Mfix * fix)
++{
++ /* If set, max_mp is the first pool_entry that has a lower constraint than
++ the one we are trying to add. */
++ Mnode *max_mp = NULL;
++ HOST_WIDE_INT max_address = fix->address + fix->forwards;
++ Mnode *mp;
++
++ /* If this fix's address is greater than the address of the first entry,
++ then we can't put the fix in this pool. We subtract the size of the
++ current fix to ensure that if the table is fully packed we still have
++ enough room to insert this value by suffling the other fixes forwards. */
++ if (minipool_vector_head &&
++ fix->address >= minipool_vector_head->max_address - fix->fix_size)
++ return NULL;
++
++ /* Scan the pool to see if a constant with the same value has already been
++ added. While we are doing this, also note the location where we must
++ insert the constant if it doesn't already exist. */
++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
++ {
++ if (GET_CODE (fix->value) == GET_CODE (mp->value)
++ && fix->mode == mp->mode
++ && (GET_CODE (fix->value) != CODE_LABEL
++ || (CODE_LABEL_NUMBER (fix->value)
++ == CODE_LABEL_NUMBER (mp->value)))
++ && rtx_equal_p (fix->value, mp->value))
++ {
++ /* More than one fix references this entry. */
++ mp->refcount++;
++ return move_minipool_fix_forward_ref (mp, max_mp, max_address);
++ }
++
++ /* Note the insertion point if necessary. */
++ if (max_mp == NULL && mp->max_address > max_address)
++ max_mp = mp;
++
++ }
++
++ /* The value is not currently in the minipool, so we need to create a new
++ entry for it. If MAX_MP is NULL, the entry will be put on the end of
++ the list since the placement is less constrained than any existing
++ entry. Otherwise, we insert the new fix before MAX_MP and, if
++ necessary, adjust the constraints on the other entries. */
++ mp = xmalloc (sizeof (*mp));
++ mp->fix_size = fix->fix_size;
++ mp->mode = fix->mode;
++ mp->value = fix->value;
++ mp->refcount = 1;
++ /* Not yet required for a backwards ref. */
++ mp->min_address = -65536;
++
++ if (max_mp == NULL)
++ {
++ mp->max_address = max_address;
++ mp->next = NULL;
++ mp->prev = minipool_vector_tail;
++
++ if (mp->prev == NULL)
++ {
++ minipool_vector_head = mp;
++ minipool_vector_label = gen_label_rtx ();
++ }
++ else
++ mp->prev->next = mp;
++
++ minipool_vector_tail = mp;
++ }
++ else
++ {
++ if (max_address > max_mp->max_address - mp->fix_size)
++ mp->max_address = max_mp->max_address - mp->fix_size;
++ else
++ mp->max_address = max_address;
++
++ mp->next = max_mp;
++ mp->prev = max_mp->prev;
++ max_mp->prev = mp;
++ if (mp->prev != NULL)
++ mp->prev->next = mp;
++ else
++ minipool_vector_head = mp;
++ }
++
++ /* Save the new entry. */
++ max_mp = mp;
++
++ /* Scan over the preceding entries and adjust their addresses as required.
++ */
++ while (mp->prev != NULL
++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
++ {
++ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
++ mp = mp->prev;
++ }
++
++ return max_mp;
++}
++
++
++static Mnode *
++move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
++ HOST_WIDE_INT min_address)
++{
++ HOST_WIDE_INT offset;
++
++ /* This should never be true, and the code below assumes these are
++ different. */
++ if (mp == min_mp)
++ abort ();
++
++ if (min_mp == NULL)
++ {
++ if (min_address > mp->min_address)
++ mp->min_address = min_address;
++ }
++ else
++ {
++ /* We will adjust this below if it is too loose. */
++ mp->min_address = min_address;
++
++ /* Unlink MP from its current position. Since min_mp is non-null,
++ mp->next must be non-null. */
++ mp->next->prev = mp->prev;
++ if (mp->prev != NULL)
++ mp->prev->next = mp->next;
++ else
++ minipool_vector_head = mp->next;
++
++ /* Reinsert it after MIN_MP. */
++ mp->prev = min_mp;
++ mp->next = min_mp->next;
++ min_mp->next = mp;
++ if (mp->next != NULL)
++ mp->next->prev = mp;
++ else
++ minipool_vector_tail = mp;
++ }
++
++ min_mp = mp;
++
++ offset = 0;
++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
++ {
++ mp->offset = offset;
++ if (mp->refcount > 0)
++ offset += mp->fix_size;
++
++ if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
++ mp->next->min_address = mp->min_address + mp->fix_size;
++ }
++
++ return min_mp;
++}
++
++
++/* Add a constant to the minipool for a backward reference. Returns the
++ node added or NULL if the constant will not fit in this pool.
++
++ Note that the code for insertion for a backwards reference can be
++ somewhat confusing because the calculated offsets for each fix do
++ not take into account the size of the pool (which is still under
++ construction. */
++static Mnode *
++add_minipool_backward_ref (Mfix * fix)
++{
++ /* If set, min_mp is the last pool_entry that has a lower constraint than
++ the one we are trying to add. */
++ Mnode *min_mp = NULL;
++ /* This can be negative, since it is only a constraint. */
++ HOST_WIDE_INT min_address = fix->address - fix->backwards;
++ Mnode *mp;
++
++ /* If we can't reach the current pool from this insn, or if we can't insert
++ this entry at the end of the pool without pushing other fixes out of
++ range, then we don't try. This ensures that we can't fail later on. */
++ if (min_address >= minipool_barrier->address
++ || (minipool_vector_tail->min_address + fix->fix_size
++ >= minipool_barrier->address))
++ return NULL;
++
++ /* Scan the pool to see if a constant with the same value has already been
++ added. While we are doing this, also note the location where we must
++ insert the constant if it doesn't already exist. */
++ for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
++ {
++ if (GET_CODE (fix->value) == GET_CODE (mp->value)
++ && fix->mode == mp->mode
++ && (GET_CODE (fix->value) != CODE_LABEL
++ || (CODE_LABEL_NUMBER (fix->value)
++ == CODE_LABEL_NUMBER (mp->value)))
++ && rtx_equal_p (fix->value, mp->value)
++ /* Check that there is enough slack to move this entry to the end
++ of the table (this is conservative). */
++ && (mp->max_address
++ > (minipool_barrier->address
++ + minipool_vector_tail->offset
++ + minipool_vector_tail->fix_size)))
++ {
++ mp->refcount++;
++ return move_minipool_fix_backward_ref (mp, min_mp, min_address);
++ }
++
++ if (min_mp != NULL)
++ mp->min_address += fix->fix_size;
++ else
++ {
++ /* Note the insertion point if necessary. */
++ if (mp->min_address < min_address)
++ {
++ min_mp = mp;
++ }
++ else if (mp->max_address
++ < minipool_barrier->address + mp->offset + fix->fix_size)
++ {
++ /* Inserting before this entry would push the fix beyond its
++ maximum address (which can happen if we have re-located a
++ forwards fix); force the new fix to come after it. */
++ min_mp = mp;
++ min_address = mp->min_address + fix->fix_size;
++ }
++ }
++ }
++
++ /* We need to create a new entry. */
++ mp = xmalloc (sizeof (*mp));
++ mp->fix_size = fix->fix_size;
++ mp->mode = fix->mode;
++ mp->value = fix->value;
++ mp->refcount = 1;
++ mp->max_address = minipool_barrier->address + 65536;
++
++ mp->min_address = min_address;
++
++ if (min_mp == NULL)
++ {
++ mp->prev = NULL;
++ mp->next = minipool_vector_head;
++
++ if (mp->next == NULL)
++ {
++ minipool_vector_tail = mp;
++ minipool_vector_label = gen_label_rtx ();
++ }
++ else
++ mp->next->prev = mp;
++
++ minipool_vector_head = mp;
++ }
++ else
++ {
++ mp->next = min_mp->next;
++ mp->prev = min_mp;
++ min_mp->next = mp;
++
++ if (mp->next != NULL)
++ mp->next->prev = mp;
++ else
++ minipool_vector_tail = mp;
++ }
++
++ /* Save the new entry. */
++ min_mp = mp;
++
++ if (mp->prev)
++ mp = mp->prev;
++ else
++ mp->offset = 0;
++
++ /* Scan over the following entries and adjust their offsets. */
++ while (mp->next != NULL)
++ {
++ if (mp->next->min_address < mp->min_address + mp->fix_size)
++ mp->next->min_address = mp->min_address + mp->fix_size;
++
++ if (mp->refcount)
++ mp->next->offset = mp->offset + mp->fix_size;
++ else
++ mp->next->offset = mp->offset;
++
++ mp = mp->next;
++ }
++
++ return min_mp;
++}
++
++
++static void
++assign_minipool_offsets (Mfix * barrier)
++{
++ HOST_WIDE_INT offset = 0;
++ Mnode *mp;
++
++ minipool_barrier = barrier;
++
++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
++ {
++ mp->offset = offset;
++
++ if (mp->refcount > 0)
++ offset += mp->fix_size;
++ }
++}
++
++
++/* Print a symbolic form of X to the debug file, F. */
++static void
++avr32_print_value (FILE * f, rtx x)
++{
++ switch (GET_CODE (x))
++ {
++ case CONST_INT:
++ fprintf (f, "0x%x", (int) INTVAL (x));
++ return;
++
++ case CONST_DOUBLE:
++ fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3));
++ return;
++
++ case CONST_VECTOR:
++ {
++ int i;
++
++ fprintf (f, "<");
++ for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
++ {
++ fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
++ if (i < (CONST_VECTOR_NUNITS (x) - 1))
++ fputc (',', f);
++ }
++ fprintf (f, ">");
++ }
++ return;
++
++ case CONST_STRING:
++ fprintf (f, "\"%s\"", XSTR (x, 0));
++ return;
++
++ case SYMBOL_REF:
++ fprintf (f, "`%s'", XSTR (x, 0));
++ return;
++
++ case LABEL_REF:
++ fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
++ return;
++
++ case CONST:
++ avr32_print_value (f, XEXP (x, 0));
++ return;
++
++ case PLUS:
++ avr32_print_value (f, XEXP (x, 0));
++ fprintf (f, "+");
++ avr32_print_value (f, XEXP (x, 1));
++ return;
++
++ case PC:
++ fprintf (f, "pc");
++ return;
++
++ default:
++ fprintf (f, "????");
++ return;
++ }
++}
++
++
++int
++is_minipool_label (rtx label)
++{
++ minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
++
++ if (GET_CODE (label) != CODE_LABEL)
++ return FALSE;
++
++ while (cur_mp_label)
++ {
++ if (CODE_LABEL_NUMBER (label)
++ == CODE_LABEL_NUMBER (cur_mp_label->label))
++ return TRUE;
++ cur_mp_label = cur_mp_label->next;
++ }
++ return FALSE;
++}
++
++
++static void
++new_minipool_label (rtx label)
++{
++ if (!cfun->machine->minipool_label_head)
++ {
++ cfun->machine->minipool_label_head =
++ ggc_alloc (sizeof (minipool_labels));
++ cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
++ cfun->machine->minipool_label_head->label = label;
++ cfun->machine->minipool_label_head->next = 0;
++ cfun->machine->minipool_label_head->prev = 0;
++ }
++ else
++ {
++ cfun->machine->minipool_label_tail->next =
++ ggc_alloc (sizeof (minipool_labels));
++ cfun->machine->minipool_label_tail->next->label = label;
++ cfun->machine->minipool_label_tail->next->next = 0;
++ cfun->machine->minipool_label_tail->next->prev =
++ cfun->machine->minipool_label_tail;
++ cfun->machine->minipool_label_tail =
++ cfun->machine->minipool_label_tail->next;
++ }
++}
++
++
++/* Output the literal table */
++static void
++dump_minipool (rtx scan)
++{
++ Mnode *mp;
++ Mnode *nmp;
++
++ if (dump_file)
++ fprintf (dump_file,
++ ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
++ INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
++
++ scan = emit_insn_after (gen_consttable_start (), scan);
++ scan = emit_insn_after (gen_align_4 (), scan);
++ scan = emit_label_after (minipool_vector_label, scan);
++ new_minipool_label (minipool_vector_label);
++
++ for (mp = minipool_vector_head; mp != NULL; mp = nmp)
++ {
++ if (mp->refcount > 0)
++ {
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Offset %u, min %ld, max %ld ",
++ (unsigned) mp->offset, (unsigned long) mp->min_address,
++ (unsigned long) mp->max_address);
++ avr32_print_value (dump_file, mp->value);
++ fputc ('\n', dump_file);
++ }
++
++ switch (mp->fix_size)
++ {
++#ifdef HAVE_consttable_4
++ case 4:
++ scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
++ break;
++
++#endif
++#ifdef HAVE_consttable_8
++ case 8:
++ scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
++ break;
++
++#endif
++#ifdef HAVE_consttable_16
++ case 16:
++ scan = emit_insn_after (gen_consttable_16 (mp->value), scan);
++ break;
++
++#endif
++ case 0:
++ /* This can happen for force-minipool entries which just are
++ there to force the minipool to be generate. */
++ break;
++ default:
++ abort ();
++ break;
++ }
++ }
++
++ nmp = mp->next;
++ free (mp);
++ }
++
++ minipool_vector_head = minipool_vector_tail = NULL;
++ scan = emit_insn_after (gen_consttable_end (), scan);
++ scan = emit_barrier_after (scan);
++}
++
++
++/* Return the cost of forcibly inserting a barrier after INSN. */
++static int
++avr32_barrier_cost (rtx insn)
++{
++ /* Basing the location of the pool on the loop depth is preferable, but at
++ the moment, the basic block information seems to be corrupt by this
++ stage of the compilation. */
++ int base_cost = 50;
++ rtx next = next_nonnote_insn (insn);
++
++ if (next != NULL && GET_CODE (next) == CODE_LABEL)
++ base_cost -= 20;
++
++ switch (GET_CODE (insn))
++ {
++ case CODE_LABEL:
++ /* It will always be better to place the table before the label, rather
++ than after it. */
++ return 50;
++
++ case INSN:
++ case CALL_INSN:
++ return base_cost;
++
++ case JUMP_INSN:
++ return base_cost - 10;
++
++ default:
++ return base_cost + 10;
++ }
++}
++
++
++/* Find the best place in the insn stream in the range
++ (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
++ Create the barrier by inserting a jump and add a new fix entry for
++ it. */
++static Mfix *
++create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address)
++{
++ HOST_WIDE_INT count = 0;
++ rtx barrier;
++ rtx from = fix->insn;
++ rtx selected = from;
++ int selected_cost;
++ HOST_WIDE_INT selected_address;
++ Mfix *new_fix;
++ HOST_WIDE_INT max_count = max_address - fix->address;
++ rtx label = gen_label_rtx ();
++
++ selected_cost = avr32_barrier_cost (from);
++ selected_address = fix->address;
++
++ while (from && count < max_count)
++ {
++ rtx tmp;
++ int new_cost;
++
++ /* This code shouldn't have been called if there was a natural barrier
++ within range. */
++ if (GET_CODE (from) == BARRIER)
++ abort ();
++
++ /* Count the length of this insn. */
++ count += get_attr_length (from);
++
++ /* If there is a jump table, add its length. */
++ tmp = is_jump_table (from);
++ if (tmp != NULL)
++ {
++ count += get_jump_table_size (tmp);
++
++ /* Jump tables aren't in a basic block, so base the cost on the
++ dispatch insn. If we select this location, we will still put
++ the pool after the table. */
++ new_cost = avr32_barrier_cost (from);
++
++ if (count < max_count && new_cost <= selected_cost)
++ {
++ selected = tmp;
++ selected_cost = new_cost;
++ selected_address = fix->address + count;
++ }
++
++ /* Continue after the dispatch table. */
++ from = NEXT_INSN (tmp);
++ continue;
++ }
++
++ new_cost = avr32_barrier_cost (from);
++
++ if (count < max_count && new_cost <= selected_cost)
++ {
++ selected = from;
++ selected_cost = new_cost;
++ selected_address = fix->address + count;
++ }
++
++ from = NEXT_INSN (from);
++ }
++
++ /* Create a new JUMP_INSN that branches around a barrier. */
++ from = emit_jump_insn_after (gen_jump (label), selected);
++ JUMP_LABEL (from) = label;
++ barrier = emit_barrier_after (from);
++ emit_label_after (label, barrier);
++
++ /* Create a minipool barrier entry for the new barrier. */
++ new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix));
++ new_fix->insn = barrier;
++ new_fix->address = selected_address;
++ new_fix->next = fix->next;
++ fix->next = new_fix;
++
++ return new_fix;
++}
++
++
++/* Record that there is a natural barrier in the insn stream at
++ ADDRESS. */
++static void
++push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
++{
++ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
++
++ fix->insn = insn;
++ fix->address = address;
++
++ fix->next = NULL;
++ if (minipool_fix_head != NULL)
++ minipool_fix_tail->next = fix;
++ else
++ minipool_fix_head = fix;
++
++ minipool_fix_tail = fix;
++}
++
++
++/* Record INSN, which will need fixing up to load a value from the
++ minipool. ADDRESS is the offset of the insn since the start of the
++ function; LOC is a pointer to the part of the insn which requires
++ fixing; VALUE is the constant that must be loaded, which is of type
++ MODE. */
++static void
++push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
++ enum machine_mode mode, rtx value)
++{
++ Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
++ rtx body = PATTERN (insn);
++
++ fix->insn = insn;
++ fix->address = address;
++ fix->loc = loc;
++ fix->mode = mode;
++ fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
++ fix->value = value;
++
++ if (GET_CODE (body) == PARALLEL)
++ {
++ /* Mcall : Ks16 << 2 */
++ fix->forwards = ((1 << 15) - 1) << 2;
++ fix->backwards = (1 << 15) << 2;
++ }
++ else if (GET_CODE (body) == SET
++ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
++ {
++ if (optimize_size)
++ {
++ /* Lddpc : Ku7 << 2 */
++ fix->forwards = ((1 << 7) - 1) << 2;
++ fix->backwards = 0;
++ }
++ else
++ {
++ /* Ld.w : Ks16 */
++ fix->forwards = ((1 << 15) - 4);
++ fix->backwards = (1 << 15);
++ }
++ }
++ else if (GET_CODE (body) == SET
++ && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
++ {
++ /* Ld.d : Ks16 */
++ fix->forwards = ((1 << 15) - 4);
++ fix->backwards = (1 << 15);
++ }
++ else if (GET_CODE (body) == UNSPEC_VOLATILE
++ && XINT (body, 1) == VUNSPEC_MVRC)
++ {
++ /* Coprocessor load */
++ /* Ldc : Ku8 << 2 */
++ fix->forwards = ((1 << 8) - 1) << 2;
++ fix->backwards = 0;
++ }
++ else
++ {
++ /* Assume worst case which is lddpc insn. */
++ fix->forwards = ((1 << 7) - 1) << 2;
++ fix->backwards = 0;
++ }
++
++ fix->minipool = NULL;
++
++ /* If an insn doesn't have a range defined for it, then it isn't expecting
++ to be reworked by this code. Better to abort now than to generate duff
++ assembly code. */
++ if (fix->forwards == 0 && fix->backwards == 0)
++ abort ();
++
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
++ GET_MODE_NAME (mode),
++ INSN_UID (insn), (unsigned long) address,
++ -1 * (long) fix->backwards, (long) fix->forwards);
++ avr32_print_value (dump_file, fix->value);
++ fprintf (dump_file, "\n");
++ }
++
++ /* Add it to the chain of fixes. */
++ fix->next = NULL;
++
++ if (minipool_fix_head != NULL)
++ minipool_fix_tail->next = fix;
++ else
++ minipool_fix_head = fix;
++
++ minipool_fix_tail = fix;
++}
++
++
++/* Scan INSN and note any of its operands that need fixing.
++ If DO_PUSHES is false we do not actually push any of the fixups
++ needed. The function returns TRUE is any fixups were needed/pushed.
++ This is used by avr32_memory_load_p() which needs to know about loads
++ of constants that will be converted into minipool loads. */
++static bool
++note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
++{
++ bool result = false;
++ int opno;
++
++ extract_insn (insn);
++
++ if (!constrain_operands (1))
++ fatal_insn_not_found (insn);
++
++ if (recog_data.n_alternatives == 0)
++ return false;
++
++ /* Fill in recog_op_alt with information about the constraints of this
++ insn. */
++ preprocess_constraints ();
++
++ for (opno = 0; opno < recog_data.n_operands; opno++)
++ {
++ rtx op;
++
++ /* Things we need to fix can only occur in inputs. */
++ if (recog_data.operand_type[opno] != OP_IN)
++ continue;
++
++ op = recog_data.operand[opno];
++
++ if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
++ {
++ if (do_pushes)
++ {
++ rtx cop = avoid_constant_pool_reference (op);
++
++ /* Casting the address of something to a mode narrower than a
++ word can cause avoid_constant_pool_reference() to return the
++ pool reference itself. That's no good to us here. Lets
++ just hope that we can use the constant pool value directly.
++ */
++ if (op == cop)
++ cop = get_pool_constant (XEXP (op, 0));
++
++ push_minipool_fix (insn, address,
++ recog_data.operand_loc[opno],
++ recog_data.operand_mode[opno], cop);
++ }
++
++ result = true;
++ }
++ else if (TARGET_HAS_ASM_ADDR_PSEUDOS
++ && avr32_address_operand (op, GET_MODE (op)))
++ {
++ /* Handle pseudo instructions using a direct address. These pseudo
++ instructions might need entries in the constant pool and we must
++ therefor create a constant pool for them, in case the
++ assembler/linker needs to insert entries. */
++ if (do_pushes)
++ {
++ /* Push a dummy constant pool entry so that the .cpool
++ directive should be inserted on the appropriate place in the
++ code even if there are no real constant pool entries. This
++ is used by the assembler and linker to know where to put
++ generated constant pool entries. */
++ push_minipool_fix (insn, address,
++ recog_data.operand_loc[opno],
++ recog_data.operand_mode[opno],
++ gen_rtx_UNSPEC (VOIDmode,
++ gen_rtvec (1, const0_rtx),
++ UNSPEC_FORCE_MINIPOOL));
++ result = true;
++ }
++ }
++ }
++ return result;
++}
++
++
++static int
++avr32_insn_is_cast (rtx insn)
++{
++
++ if (NONJUMP_INSN_P (insn)
++ && GET_CODE (PATTERN (insn)) == SET
++ && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
++ || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
++ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
++ && REG_P (SET_DEST (PATTERN (insn))))
++ return true;
++ return false;
++}
++
++
++/* Replace all occurances of reg FROM with reg TO in X. */
++rtx
++avr32_replace_reg (rtx x, rtx from, rtx to)
++{
++ int i, j;
++ const char *fmt;
++
++ gcc_assert ( REG_P (from) && REG_P (to) );
++
++ /* Allow this function to make replacements in EXPR_LISTs. */
++ if (x == 0)
++ return 0;
++
++ if (rtx_equal_p (x, from))
++ return to;
++
++ if (GET_CODE (x) == SUBREG)
++ {
++ rtx new = avr32_replace_reg (SUBREG_REG (x), from, to);
++
++ if (GET_CODE (new) == CONST_INT)
++ {
++ x = simplify_subreg (GET_MODE (x), new,
++ GET_MODE (SUBREG_REG (x)),
++ SUBREG_BYTE (x));
++ gcc_assert (x);
++ }
++ else
++ SUBREG_REG (x) = new;
++
++ return x;
++ }
++ else if (GET_CODE (x) == ZERO_EXTEND)
++ {
++ rtx new = avr32_replace_reg (XEXP (x, 0), from, to);
++
++ if (GET_CODE (new) == CONST_INT)
++ {
++ x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
++ new, GET_MODE (XEXP (x, 0)));
++ gcc_assert (x);
++ }
++ else
++ XEXP (x, 0) = new;
++
++ return x;
++ }
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'e')
++ XEXP (x, i) = avr32_replace_reg (XEXP (x, i), from, to);
++ else if (fmt[i] == 'E')
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ XVECEXP (x, i, j) = avr32_replace_reg (XVECEXP (x, i, j), from, to);
++ }
++
++ return x;
++}
++
++
++/* FIXME: The level of nesting in this function is way too deep. It needs to be
++ torn apart. */
++static void
++avr32_reorg_optimization (void)
++{
++ rtx first = get_first_nonnote_insn ();
++ rtx insn;
++
++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
++ {
++
++ /* Scan through all insns looking for cast operations. */
++ if (dump_file)
++ {
++ fprintf (dump_file, ";; Deleting redundant cast operations:\n");
++ }
++ for (insn = first; insn; insn = NEXT_INSN (insn))
++ {
++ rtx reg, src_reg, scan;
++ enum machine_mode mode;
++ int unused_cast;
++ rtx label_ref;
++
++ if (avr32_insn_is_cast (insn)
++ && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
++ || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
++ {
++ mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
++ reg = SET_DEST (PATTERN (insn));
++ src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
++ }
++ else
++ {
++ continue;
++ }
++
++ unused_cast = false;
++ label_ref = NULL_RTX;
++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
++ {
++ /* Check if we have reached the destination of a simple
++ conditional jump which we have already scanned past. If so,
++ we can safely continue scanning. */
++ if (LABEL_P (scan) && label_ref != NULL_RTX)
++ {
++ if (CODE_LABEL_NUMBER (scan) ==
++ CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
++ label_ref = NULL_RTX;
++ else
++ break;
++ }
++
++ if (!INSN_P (scan))
++ continue;
++
++ /* For conditional jumps we can manage to keep on scanning if
++ we meet the destination label later on before any new jump
++ insns occure. */
++ if (GET_CODE (scan) == JUMP_INSN)
++ {
++ if (any_condjump_p (scan) && label_ref == NULL_RTX)
++ label_ref = condjump_label (scan);
++ else
++ break;
++ }
++
++ /* Check if we have a call and the register is used as an argument. */
++ if (CALL_P (scan)
++ && find_reg_fusage (scan, USE, reg) )
++ break;
++
++ if (!reg_mentioned_p (reg, PATTERN (scan)))
++ continue;
++
++ /* Check if casted register is used in this insn */
++ if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
++ && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
++ GET_MODE (reg)))
++ {
++ /* If not used in the source to the set or in a memory
++ expression in the destiantion then the register is used
++ as a destination and is really dead. */
++ if (single_set (scan)
++ && GET_CODE (PATTERN (scan)) == SET
++ && REG_P (SET_DEST (PATTERN (scan)))
++ && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
++ && label_ref == NULL_RTX)
++ {
++ unused_cast = true;
++ }
++ break;
++ }
++
++ /* Check if register is dead or set in this insn */
++ if (dead_or_set_p (scan, reg))
++ {
++ unused_cast = true;
++ break;
++ }
++ }
++
++ /* Check if we have unresolved conditional jumps */
++ if (label_ref != NULL_RTX)
++ continue;
++
++ if (unused_cast)
++ {
++ if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
++ {
++ /* One operand cast, safe to delete */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; INSN %i removed, casted register %i value not used.\n",
++ INSN_UID (insn), REGNO (reg));
++ }
++ SET_INSN_DELETED (insn);
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
++ }
++ else
++ {
++ /* Two operand cast, which really could be substituted with
++ a move, if the source register is dead after the cast
++ insn and then the insn which sets the source register
++ could instead directly set the destination register for
++ the cast. As long as there are no insns in between which
++ uses the register. */
++ rtx link = NULL_RTX;
++ rtx set;
++ rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
++ unused_cast = false;
++
++ if (!find_reg_note (insn, REG_DEAD, src_reg))
++ continue;
++
++ /* Search for the insn which sets the source register */
++ for (scan = PREV_INSN (insn);
++ scan && GET_CODE (scan) != CODE_LABEL;
++ scan = PREV_INSN (scan))
++ {
++ if (! INSN_P (scan))
++ continue;
++
++ set = single_set (scan);
++ // Fix for bug #11763 : the following if condition
++ // has been modified and else part is included to
++ // set the link to NULL_RTX.
++ // if (set && rtx_equal_p (src_reg, SET_DEST (set)))
++ if (set && (REGNO(src_reg) == REGNO(SET_DEST(set))))
++ {
++ if (rtx_equal_p (src_reg, SET_DEST (set)))
++ {
++ link = scan;
++ break;
++ }
++ else
++ {
++ link = NULL_RTX;
++ break;
++ }
++ }
++ }
++
++
++ /* Found no link or link is a call insn where we can not
++ change the destination register */
++ if (link == NULL_RTX || CALL_P (link))
++ continue;
++
++ /* Scan through all insn between link and insn */
++ for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
++ {
++ /* Don't try to trace forward past a CODE_LABEL if we
++ haven't seen INSN yet. Ordinarily, we will only
++ find the setting insn in LOG_LINKS if it is in the
++ same basic block. However, cross-jumping can insert
++ code labels in between the load and the call, and
++ can result in situations where a single call insn
++ may have two targets depending on where we came
++ from. */
++
++ if (GET_CODE (scan) == CODE_LABEL)
++ break;
++
++ if (!INSN_P (scan))
++ continue;
++
++ /* Don't try to trace forward past a JUMP. To optimize
++ safely, we would have to check that all the
++ instructions at the jump destination did not use REG.
++ */
++
++ if (GET_CODE (scan) == JUMP_INSN)
++ {
++ break;
++ }
++
++ if (!reg_mentioned_p (src_reg, PATTERN (scan)))
++ continue;
++
++ /* We have reached the cast insn */
++ if (scan == insn)
++ {
++ /* We can remove cast and replace the destination
++ register of the link insn with the destination
++ of the cast */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; INSN %i removed, casted value unused. "
++ "Destination of removed cast operation: register %i, folded into INSN %i.\n",
++ INSN_UID (insn), REGNO (reg),
++ INSN_UID (link));
++ }
++ /* Update link insn */
++ SET_DEST (PATTERN (link)) =
++ gen_rtx_REG (mode, REGNO (reg));
++ /* Force the instruction to be recognized again */
++ INSN_CODE (link) = -1;
++
++ /* Delete insn */
++ SET_INSN_DELETED (insn);
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
++ break;
++ }
++ }
++ }
++ }
++ }
++ }
++
++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
++ {
++
++ /* Scan through all insns looking for shifted add operations */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Deleting redundant shifted add operations:\n");
++ }
++ for (insn = first; insn; insn = NEXT_INSN (insn))
++ {
++ rtx reg, mem_expr, scan, op0, op1;
++ int add_only_used_as_pointer;
++
++ if (INSN_P (insn)
++ && GET_CODE (PATTERN (insn)) == SET
++ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
++ && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
++ || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
++ && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
++ CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
++ && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
++ && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
++ {
++ reg = SET_DEST (PATTERN (insn));
++ mem_expr = SET_SRC (PATTERN (insn));
++ op0 = XEXP (XEXP (mem_expr, 0), 0);
++ op1 = XEXP (mem_expr, 1);
++ }
++ else
++ {
++ continue;
++ }
++
++ /* Scan forward the check if the result of the shifted add
++ operation is only used as an address in memory operations and
++ that the operands to the shifted add are not clobbered. */
++ add_only_used_as_pointer = false;
++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
++ {
++ if (!INSN_P (scan))
++ continue;
++
++ /* Don't try to trace forward past a JUMP or CALL. To optimize
++ safely, we would have to check that all the instructions at
++ the jump destination did not use REG. */
++
++ if (GET_CODE (scan) == JUMP_INSN)
++ {
++ break;
++ }
++
++ /* If used in a call insn then we cannot optimize it away */
++ if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
++ break;
++
++ /* If any of the operands of the shifted add are clobbered we
++ cannot optimize the shifted adda away */
++ if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
++ || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
++ break;
++
++ if (!reg_mentioned_p (reg, PATTERN (scan)))
++ continue;
++
++ /* If used any other place than as a pointer or as the
++ destination register we failed */
++ if (!(single_set (scan)
++ && GET_CODE (PATTERN (scan)) == SET
++ && ((MEM_P (SET_DEST (PATTERN (scan)))
++ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
++ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == REGNO (reg))
++ || (MEM_P (SET_SRC (PATTERN (scan)))
++ && REG_P (XEXP (SET_SRC (PATTERN (scan)), 0))
++ && REGNO (XEXP
++ (SET_SRC (PATTERN (scan)), 0)) == REGNO (reg))))
++ && !(GET_CODE (PATTERN (scan)) == SET
++ && REG_P (SET_DEST (PATTERN (scan)))
++ && !regno_use_in (REGNO (reg),
++ SET_SRC (PATTERN (scan)))))
++ break;
++
++ /* We cannot replace the pointer in TImode insns
++ as these has a differene addressing mode than the other
++ memory insns. */
++ if ( GET_MODE (SET_DEST (PATTERN (scan))) == TImode )
++ break;
++
++ /* Check if register is dead or set in this insn */
++ if (dead_or_set_p (scan, reg))
++ {
++ add_only_used_as_pointer = true;
++ break;
++ }
++ }
++
++ if (add_only_used_as_pointer)
++ {
++ /* Lets delete the add insn and replace all memory references
++ which uses the pointer with the full expression. */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Deleting INSN %i since address expression can be folded into all "
++ "memory references using this expression\n",
++ INSN_UID (insn));
++ }
++ SET_INSN_DELETED (insn);
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
++
++ for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
++ {
++ if (!INSN_P (scan))
++ continue;
++
++ if (!reg_mentioned_p (reg, PATTERN (scan)))
++ continue;
++
++ /* If used any other place than as a pointer or as the
++ destination register we failed */
++ if ((single_set (scan)
++ && GET_CODE (PATTERN (scan)) == SET
++ && ((MEM_P (SET_DEST (PATTERN (scan)))
++ && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
++ && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
++ REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
++ &&
++ REG_P (XEXP
++ (SET_SRC (PATTERN (scan)),
++ 0))
++ &&
++ REGNO (XEXP
++ (SET_SRC (PATTERN (scan)),
++ 0)) == REGNO (reg)))))
++ {
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Register %i replaced by indexed address in INSN %i\n",
++ REGNO (reg), INSN_UID (scan));
++ }
++ if (MEM_P (SET_DEST (PATTERN (scan))))
++ XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
++ else
++ XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
++ }
++
++ /* Check if register is dead or set in this insn */
++ if (dead_or_set_p (scan, reg))
++ {
++ break;
++ }
++
++ }
++ }
++ }
++ }
++
++
++ if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
++ {
++
++ /* Scan through all insns looking for conditional register to
++ register move operations */
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Folding redundant conditional move operations:\n");
++ }
++ for (insn = first; insn; insn = next_nonnote_insn (insn))
++ {
++ rtx src_reg, dst_reg, scan, test;
++
++ if (INSN_P (insn)
++ && GET_CODE (PATTERN (insn)) == COND_EXEC
++ && GET_CODE (COND_EXEC_CODE (PATTERN (insn))) == SET
++ && REG_P (SET_SRC (COND_EXEC_CODE (PATTERN (insn))))
++ && REG_P (SET_DEST (COND_EXEC_CODE (PATTERN (insn))))
++ && find_reg_note (insn, REG_DEAD, SET_SRC (COND_EXEC_CODE (PATTERN (insn)))))
++ {
++ src_reg = SET_SRC (COND_EXEC_CODE (PATTERN (insn)));
++ dst_reg = SET_DEST (COND_EXEC_CODE (PATTERN (insn)));
++ test = COND_EXEC_TEST (PATTERN (insn));
++ }
++ else
++ {
++ continue;
++ }
++
++ /* Scan backward through the rest of insns in this if-then or if-else
++ block and check if we can fold the move into another of the conditional
++ insns in the same block. */
++ scan = prev_nonnote_insn (insn);
++ while (INSN_P (scan)
++ && GET_CODE (PATTERN (scan)) == COND_EXEC
++ && rtx_equal_p (COND_EXEC_TEST (PATTERN (scan)), test))
++ {
++ rtx pattern = COND_EXEC_CODE (PATTERN (scan));
++ if ( GET_CODE (pattern) == PARALLEL )
++ pattern = XVECEXP (pattern, 0, 0);
++
++ if ( reg_set_p (src_reg, pattern) )
++ {
++ /* Fold in the destination register for the cond. move
++ into this insn. */
++ SET_DEST (pattern) = dst_reg;
++ if (dump_file)
++ {
++ fprintf (dump_file,
++ ";; Deleting INSN %i since this operation can be folded into INSN %i\n",
++ INSN_UID (insn), INSN_UID (scan));
++ }
++
++ /* Scan and check if any of the insns in between uses the src_reg. We
++ must then replace it with the dst_reg. */
++ while ( (scan = next_nonnote_insn (scan)) != insn ){
++ avr32_replace_reg (scan, src_reg, dst_reg);
++ }
++ /* Delete the insn. */
++ SET_INSN_DELETED (insn);
++
++ /* Force the instruction to be recognized again */
++ INSN_CODE (insn) = -1;
++ break;
++ }
++
++ /* If the destination register is used but not set in this insn
++ we cannot fold. */
++ if ( reg_mentioned_p (dst_reg, pattern) )
++ break;
++
++ scan = prev_nonnote_insn (scan);
++ }
++ }
++ }
++
++}
++
++
++/* Exported to toplev.c.
++
++ Do a final pass over the function, just before delayed branch
++ scheduling. */
++static void
++avr32_reorg (void)
++{
++ rtx insn;
++ HOST_WIDE_INT address = 0;
++ Mfix *fix;
++
++ minipool_fix_head = minipool_fix_tail = NULL;
++
++ /* The first insn must always be a note, or the code below won't scan it
++ properly. */
++ insn = get_insns ();
++ if (GET_CODE (insn) != NOTE)
++ abort ();
++
++ /* Scan all the insns and record the operands that will need fixing. */
++ for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
++ {
++ if (GET_CODE (insn) == BARRIER)
++ push_minipool_barrier (insn, address);
++ else if (INSN_P (insn))
++ {
++ rtx table;
++
++ note_invalid_constants (insn, address, true);
++ address += get_attr_length (insn);
++
++ /* If the insn is a vector jump, add the size of the table and skip
++ the table. */
++ if ((table = is_jump_table (insn)) != NULL)
++ {
++ address += get_jump_table_size (table);
++ insn = table;
++ }
++ }
++ }
++
++ fix = minipool_fix_head;
++
++ /* Now scan the fixups and perform the required changes. */
++ while (fix)
++ {
++ Mfix *ftmp;
++ Mfix *fdel;
++ Mfix *last_added_fix;
++ Mfix *last_barrier = NULL;
++ Mfix *this_fix;
++
++ /* Skip any further barriers before the next fix. */
++ while (fix && GET_CODE (fix->insn) == BARRIER)
++ fix = fix->next;
++
++ /* No more fixes. */
++ if (fix == NULL)
++ break;
++
++ last_added_fix = NULL;
++
++ for (ftmp = fix; ftmp; ftmp = ftmp->next)
++ {
++ if (GET_CODE (ftmp->insn) == BARRIER)
++ {
++ if (ftmp->address >= minipool_vector_head->max_address)
++ break;
++
++ last_barrier = ftmp;
++ }
++ else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
++ break;
++
++ last_added_fix = ftmp; /* Keep track of the last fix added.
++ */
++ }
++
++ /* If we found a barrier, drop back to that; any fixes that we could
++ have reached but come after the barrier will now go in the next
++ mini-pool. */
++ if (last_barrier != NULL)
++ {
++ /* Reduce the refcount for those fixes that won't go into this pool
++ after all. */
++ for (fdel = last_barrier->next;
++ fdel && fdel != ftmp; fdel = fdel->next)
++ {
++ fdel->minipool->refcount--;
++ fdel->minipool = NULL;
++ }
++
++ ftmp = last_barrier;
++ }
++ else
++ {
++ /* ftmp is first fix that we can't fit into this pool and there no
++ natural barriers that we could use. Insert a new barrier in the
++ code somewhere between the previous fix and this one, and
++ arrange to jump around it. */
++ HOST_WIDE_INT max_address;
++
++ /* The last item on the list of fixes must be a barrier, so we can
++ never run off the end of the list of fixes without last_barrier
++ being set. */
++ if (ftmp == NULL)
++ abort ();
++
++ max_address = minipool_vector_head->max_address;
++ /* Check that there isn't another fix that is in range that we
++ couldn't fit into this pool because the pool was already too
++ large: we need to put the pool before such an instruction. */
++ if (ftmp->address < max_address)
++ max_address = ftmp->address;
++
++ last_barrier = create_fix_barrier (last_added_fix, max_address);
++ }
++
++ assign_minipool_offsets (last_barrier);
++
++ while (ftmp)
++ {
++ if (GET_CODE (ftmp->insn) != BARRIER
++ && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
++ == NULL))
++ break;
++
++ ftmp = ftmp->next;
++ }
++
++ /* Scan over the fixes we have identified for this pool, fixing them up
++ and adding the constants to the pool itself. */
++ for (this_fix = fix; this_fix && ftmp != this_fix;
++ this_fix = this_fix->next)
++ if (GET_CODE (this_fix->insn) != BARRIER
++ /* Do nothing for entries present just to force the insertion of
++ a minipool. */
++ && !IS_FORCE_MINIPOOL (this_fix->value))
++ {
++ rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
++ minipool_vector_label),
++ this_fix->minipool->offset);
++ *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
++ }
++
++ dump_minipool (last_barrier->insn);
++ fix = ftmp;
++ }
++
++ /* Free the minipool memory. */
++ obstack_free (&minipool_obstack, minipool_startobj);
++
++ avr32_reorg_optimization ();
++}
++
++
++/* Hook for doing some final scanning of instructions. Does nothing yet...*/
++void
++avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
++ rtx * opvec ATTRIBUTE_UNUSED,
++ int noperands ATTRIBUTE_UNUSED)
++{
++ return;
++}
++
++
++/* Function for changing the condition on the next instruction,
++ should be used when emmiting compare instructions and
++ the condition of the next instruction needs to change.
++*/
++int
++set_next_insn_cond (rtx cur_insn, rtx new_cond)
++{
++ rtx next_insn = next_nonnote_insn (cur_insn);
++ if ((next_insn != NULL_RTX)
++ && (INSN_P (next_insn)))
++ {
++ if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
++ {
++ /* Branch instructions */
++ XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
++ /* Force the instruction to be recognized again */
++ INSN_CODE (next_insn) = -1;
++ return TRUE;
++ }
++ else if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
++ GET_MODE (SET_SRC (PATTERN (next_insn)))))
++ {
++ /* scc with no compare */
++ SET_SRC (PATTERN (next_insn)) = new_cond;
++ /* Force the instruction to be recognized again */
++ INSN_CODE (next_insn) = -1;
++ return TRUE;
++ }
++ else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
++ {
++ if ( GET_CODE (new_cond) == UNSPEC )
++ {
++ COND_EXEC_TEST (PATTERN (next_insn)) =
++ gen_rtx_UNSPEC (CCmode,
++ gen_rtvec (2,
++ XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0),
++ XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1)),
++ XINT (new_cond, 1));
++ }
++ else
++ {
++ PUT_CODE(COND_EXEC_TEST (PATTERN (next_insn)), GET_CODE(new_cond));
++ }
++ }
++ }
++
++ return FALSE;
++}
++
++
++/* Function for obtaining the condition for the next instruction after cur_insn.
++*/
++rtx
++get_next_insn_cond (rtx cur_insn)
++{
++ rtx next_insn = next_nonnote_insn (cur_insn);
++ rtx cond = NULL_RTX;
++ if (next_insn != NULL_RTX
++ && INSN_P (next_insn))
++ {
++ if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
++ {
++ /* Branch and cond if then else instructions */
++ cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
++ }
++ else if ((GET_CODE (PATTERN (next_insn)) == SET)
++ && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
++ GET_MODE (SET_SRC (PATTERN (next_insn)))))
++ {
++ /* scc with no compare */
++ cond = SET_SRC (PATTERN (next_insn));
++ }
++ else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
++ {
++ cond = COND_EXEC_TEST (PATTERN (next_insn));
++ }
++ }
++ return cond;
++}
++
++
++/* Check if the next insn is a conditional insn that will emit a compare
++ for itself.
++*/
++rtx
++next_insn_emits_cmp (rtx cur_insn)
++{
++ rtx next_insn = next_nonnote_insn (cur_insn);
++ rtx cond = NULL_RTX;
++ if (next_insn != NULL_RTX
++ && INSN_P (next_insn))
++ {
++ if ( ((GET_CODE (PATTERN (next_insn)) == SET)
++ && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)
++ && (XEXP (XEXP (SET_SRC (PATTERN (next_insn)), 0),0) != cc0_rtx))
++ || GET_CODE (PATTERN (next_insn)) == COND_EXEC )
++ return TRUE;
++ }
++ return FALSE;
++}
++
++
++rtx
++avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
++{
++
++ rtx new_cond = NULL_RTX;
++ rtx ops[2];
++ rtx compare_pattern;
++ ops[0] = op0;
++ ops[1] = op1;
++
++ if ( GET_CODE (op0) == AND )
++ compare_pattern = op0;
++ else
++ compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
++
++ new_cond = is_compare_redundant (compare_pattern, cond);
++
++ if (new_cond != NULL_RTX)
++ return new_cond;
++
++ /* Check if we are inserting a bit-load instead of a compare. */
++ if ( GET_CODE (op0) == AND )
++ {
++ ops[0] = XEXP (op0, 0);
++ ops[1] = XEXP (op0, 1);
++ output_asm_insn ("bld\t%0, %p1", ops);
++ return cond;
++ }
++
++ /* Insert compare */
++ switch (mode)
++ {
++ case QImode:
++ output_asm_insn ("cp.b\t%0, %1", ops);
++ break;
++ case HImode:
++ output_asm_insn ("cp.h\t%0, %1", ops);
++ break;
++ case SImode:
++ output_asm_insn ("cp.w\t%0, %1", ops);
++ break;
++ case DImode:
++ if (GET_CODE (op1) != REG)
++ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
++ else
++ output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
++ break;
++ default:
++ internal_error ("Unknown comparison mode");
++ break;
++ }
++
++ return cond;
++}
++
++
++int
++avr32_load_multiple_operation (rtx op,
++ enum machine_mode mode ATTRIBUTE_UNUSED)
++{
++ int count = XVECLEN (op, 0);
++ unsigned int dest_regno;
++ rtx src_addr;
++ rtx elt;
++ int i = 1, base = 0;
++
++ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
++ return 0;
++
++ /* Check to see if this might be a write-back. */
++ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
++ {
++ i++;
++ base = 1;
++
++ /* Now check it more carefully. */
++ if (GET_CODE (SET_DEST (elt)) != REG
++ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
++ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
++ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
++ return 0;
++ }
++
++ /* Perform a quick check so we don't blow up below. */
++ if (count <= 1
++ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
++ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
++ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
++ return 0;
++
++ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
++ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
++
++ for (; i < count; i++)
++ {
++ elt = XVECEXP (op, 0, i);
++
++ if (GET_CODE (elt) != SET
++ || GET_CODE (SET_DEST (elt)) != REG
++ || GET_MODE (SET_DEST (elt)) != SImode
++ || GET_CODE (SET_SRC (elt)) != UNSPEC)
++ return 0;
++ }
++
++ return 1;
++}
++
++
++int
++avr32_store_multiple_operation (rtx op,
++ enum machine_mode mode ATTRIBUTE_UNUSED)
++{
++ int count = XVECLEN (op, 0);
++ int src_regno;
++ rtx dest_addr;
++ rtx elt;
++ int i = 1;
++
++ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
++ return 0;
++
++ /* Perform a quick check so we don't blow up below. */
++ if (count <= i
++ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
++ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
++ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
++ return 0;
++
++ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
++ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
++
++ for (; i < count; i++)
++ {
++ elt = XVECEXP (op, 0, i);
++
++ if (GET_CODE (elt) != SET
++ || GET_CODE (SET_DEST (elt)) != MEM
++ || GET_MODE (SET_DEST (elt)) != SImode
++ || GET_CODE (SET_SRC (elt)) != UNSPEC)
++ return 0;
++ }
++
++ return 1;
++}
++
++
++int
++avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Check if they use the same accumulator */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++
++int
++avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
++{
++ /*
++ Check if the mul instruction produces the accumulator for the mac
++ instruction. */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++ return FALSE;
++}
++
++
++int
++avr32_store_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Only valid bypass if the output result is used as an src in the store
++ instruction, NOT if used as a pointer or base. */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++
++int
++avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Check if the register holding the result from the mul instruction is
++ used as a result register in the input instruction. */
++ if (rtx_equal_p
++ (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++
++int
++avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
++{
++ /* Check if the first loaded word in insn_out is used in insn_in. */
++ rtx dst_reg;
++ rtx second_loaded_reg;
++
++ /* If this is a double alu operation then the bypass is not valid */
++ if ((get_attr_type (insn_in) == TYPE_ALU
++ || get_attr_type (insn_in) == TYPE_ALU2)
++ && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
++ return FALSE;
++
++ /* Get the destination register in the load */
++ if (!REG_P (SET_DEST (PATTERN (insn_out))))
++ return FALSE;
++
++ dst_reg = SET_DEST (PATTERN (insn_out));
++ second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
++
++ if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
++ return TRUE;
++
++ return FALSE;
++}
++
++
++int
++avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
++{
++ /*
++ Check if the two first loaded word in insn_out are used in insn_in. */
++ rtx dst_reg;
++ rtx third_loaded_reg, fourth_loaded_reg;
++
++ /* Get the destination register in the load */
++ if (!REG_P (SET_DEST (PATTERN (insn_out))))
++ return FALSE;
++
++ dst_reg = SET_DEST (PATTERN (insn_out));
++ third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
++ fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
++
++ if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
++ && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
++ {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++
++rtx
++avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test )
++{
++ rtx branch_insn;
++ rtx cmp_test;
++ rtx compare_op0;
++ rtx compare_op1;
++
++
++ if ( !ce_info
++ || test == NULL_RTX
++ || !reg_mentioned_p (cc0_rtx, test))
++ return test;
++
++ branch_insn = BB_END (ce_info->test_bb);
++ cmp_test = PATTERN(prev_nonnote_insn (branch_insn));
++
++ if (GET_CODE(cmp_test) != SET
++ || !CC0_P(XEXP(cmp_test, 0)) )
++ return cmp_test;
++
++ if ( GET_CODE(SET_SRC(cmp_test)) == COMPARE ){
++ compare_op0 = XEXP(SET_SRC(cmp_test), 0);
++ compare_op1 = XEXP(SET_SRC(cmp_test), 1);
++ } else {
++ compare_op0 = SET_SRC(cmp_test);
++ compare_op1 = const0_rtx;
++ }
++
++ return gen_rtx_fmt_ee (GET_CODE(test), GET_MODE (compare_op0),
++ compare_op0, compare_op1);
++}
++
++
++rtx
++avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
++ int *num_true_changes)
++{
++ rtx test = COND_EXEC_TEST(pattern);
++ rtx op = COND_EXEC_CODE(pattern);
++ rtx cmp_insn;
++ rtx cond_exec_insn;
++ int inputs_set_outside_ifblock = 1;
++ basic_block current_bb = BLOCK_FOR_INSN (insn);
++ rtx bb_insn ;
++ enum machine_mode mode = GET_MODE (XEXP (op, 0));
++
++ if (CC0_P(XEXP(test, 0)))
++ test = avr32_ifcvt_modify_test (ce_info,
++ test );
++
++ /* We do not support multiple tests. */
++ if ( ce_info
++ && ce_info->num_multiple_test_blocks > 0 )
++ return NULL_RTX;
++
++ pattern = gen_rtx_COND_EXEC (VOIDmode, test, op);
++
++ if ( !reload_completed )
++ {
++ rtx start;
++ int num_insns;
++ int max_insns = MAX_CONDITIONAL_EXECUTE;
++
++ if ( !ce_info )
++ return op;
++
++ /* Check if the insn is not suitable for conditional
++ execution. */
++ start_sequence ();
++ cond_exec_insn = emit_insn (pattern);
++ if ( recog_memoized (cond_exec_insn) < 0
++ && can_create_pseudo_p () )
++ {
++ /* Insn is not suitable for conditional execution, try
++ to fix it up by using an extra scratch register or
++ by pulling the operation outside the if-then-else
++ and then emiting a conditional move inside the if-then-else. */
++ end_sequence ();
++ if ( GET_CODE (op) != SET
++ || !REG_P (SET_DEST (op))
++ || GET_CODE (SET_SRC (op)) == IF_THEN_ELSE
++ || GET_MODE_SIZE (mode) > UNITS_PER_WORD )
++ return NULL_RTX;
++
++ /* Check if any of the input operands to the insn is set inside the
++ current block. */
++ if ( current_bb->index == ce_info->then_bb->index )
++ start = PREV_INSN (BB_HEAD (ce_info->then_bb));
++ else
++ start = PREV_INSN (BB_HEAD (ce_info->else_bb));
++
++
++ for ( bb_insn = next_nonnote_insn (start); bb_insn != insn; bb_insn = next_nonnote_insn (bb_insn) )
++ {
++ rtx set = single_set (bb_insn);
++
++ if ( set && reg_mentioned_p (SET_DEST (set), SET_SRC (op)))
++ {
++ inputs_set_outside_ifblock = 0;
++ break;
++ }
++ }
++
++ cmp_insn = prev_nonnote_insn (BB_END (ce_info->test_bb));
++
++
++ /* Check if we can insert more insns. */
++ num_insns = ( ce_info->num_then_insns +
++ ce_info->num_else_insns +
++ ce_info->num_cond_clobber_insns +
++ ce_info->num_extra_move_insns );
++
++ if ( ce_info->num_else_insns != 0 )
++ max_insns *=2;
++
++ if ( num_insns >= max_insns )
++ return NULL_RTX;
++
++ /* Check if we have an instruction which might be converted to
++ conditional form if we give it a scratch register to clobber. */
++ {
++ rtx clobber_insn;
++ rtx scratch_reg = gen_reg_rtx (mode);
++ rtx new_pattern = copy_rtx (pattern);
++ rtx set_src = SET_SRC (COND_EXEC_CODE (new_pattern));
++
++ rtx clobber = gen_rtx_CLOBBER (mode, scratch_reg);
++ rtx vec[2] = { COND_EXEC_CODE (new_pattern), clobber };
++ COND_EXEC_CODE (new_pattern) = gen_rtx_PARALLEL (mode, gen_rtvec_v (2, vec));
++
++ start_sequence ();
++ clobber_insn = emit_insn (new_pattern);
++
++ if ( recog_memoized (clobber_insn) >= 0
++ && ( ( GET_RTX_LENGTH (GET_CODE (set_src)) == 2
++ && CONST_INT_P (XEXP (set_src, 1))
++ && avr32_const_ok_for_constraint_p (INTVAL (XEXP (set_src, 1)), 'K', "Ks08") )
++ || !ce_info->else_bb
++ || current_bb->index == ce_info->else_bb->index ))
++ {
++ end_sequence ();
++ /* Force the insn to be recognized again. */
++ INSN_CODE (insn) = -1;
++
++ /* If this is the first change in this IF-block then
++ signal that we have made a change. */
++ if ( ce_info->num_cond_clobber_insns == 0
++ && ce_info->num_extra_move_insns == 0 )
++ *num_true_changes += 1;
++
++ ce_info->num_cond_clobber_insns++;
++
++ if (dump_file)
++ fprintf (dump_file,
++ "\nReplacing INSN %d with an insn using a scratch register for later ifcvt passes...\n",
++ INSN_UID (insn));
++
++ return COND_EXEC_CODE (new_pattern);
++ }
++ end_sequence ();
++ }
++
++ if ( inputs_set_outside_ifblock )
++ {
++ /* Check if the insn before the cmp is an and which used
++ together with the cmp can be optimized into a bld. If
++ so then we should try to put the insn before the and
++ so that we can catch the bld peephole. */
++ rtx set;
++ rtx insn_before_cmp_insn = prev_nonnote_insn (cmp_insn);
++ if (insn_before_cmp_insn
++ && (set = single_set (insn_before_cmp_insn))
++ && GET_CODE (SET_SRC (set)) == AND
++ && one_bit_set_operand (XEXP (SET_SRC (set), 1), SImode)
++ /* Also make sure that the insn does not set any
++ of the input operands to the insn we are pulling out. */
++ && !reg_mentioned_p (SET_DEST (set), SET_SRC (op)) )
++ cmp_insn = prev_nonnote_insn (cmp_insn);
++
++ /* We can try to put the operation outside the if-then-else
++ blocks and insert a move. */
++ if ( !insn_invalid_p (insn)
++ /* Do not allow conditional insns to be moved outside the
++ if-then-else. */
++ && !reg_mentioned_p (cc0_rtx, insn)
++ /* We cannot move memory loads outside of the if-then-else
++ since the memory access should not be perfomed if the
++ condition is not met. */
++ && !mem_mentioned_p (SET_SRC (op)) )
++ {
++ rtx scratch_reg = gen_reg_rtx (mode);
++ rtx op_pattern = copy_rtx (op);
++ rtx new_insn, seq;
++ rtx link, prev_link;
++ op = copy_rtx (op);
++ /* Emit the operation to a temp reg before the compare,
++ and emit a move inside the if-then-else, hoping that the
++ whole if-then-else can be converted to conditional
++ execution. */
++ SET_DEST (op_pattern) = scratch_reg;
++ start_sequence ();
++ new_insn = emit_insn (op_pattern);
++ seq = get_insns();
++ end_sequence ();
++
++ /* Check again that the insn is valid. For some insns the insn might
++ become invalid if the destination register is changed. Ie. for mulacc
++ operations. */
++ if ( insn_invalid_p (new_insn) )
++ return NULL_RTX;
++
++ emit_insn_before_setloc (seq, cmp_insn, INSN_LOCATOR (insn));
++
++ if (dump_file)
++ fprintf (dump_file,
++ "\nMoving INSN %d out of IF-block by adding INSN %d...\n",
++ INSN_UID (insn), INSN_UID (new_insn));
++
++ ce_info->extra_move_insns[ce_info->num_extra_move_insns] = insn;
++ ce_info->moved_insns[ce_info->num_extra_move_insns] = new_insn;
++ XEXP (op, 1) = scratch_reg;
++ /* Force the insn to be recognized again. */
++ INSN_CODE (insn) = -1;
++
++ /* Move REG_DEAD notes to the moved insn. */
++ prev_link = NULL_RTX;
++ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
++ {
++ if (REG_NOTE_KIND (link) == REG_DEAD)
++ {
++ /* Add the REG_DEAD note to the new insn. */
++ rtx dead_reg = XEXP (link, 0);
++ REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_DEAD, dead_reg, REG_NOTES (new_insn));
++ /* Remove the REG_DEAD note from the insn we convert to a move. */
++ if ( prev_link )
++ XEXP (prev_link, 1) = XEXP (link, 1);
++ else
++ REG_NOTES (insn) = XEXP (link, 1);
++ }
++ else
++ {
++ prev_link = link;
++ }
++ }
++ /* Add a REG_DEAD note to signal that the scratch register is dead. */
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, scratch_reg, REG_NOTES (insn));
++
++ /* If this is the first change in this IF-block then
++ signal that we have made a change. */
++ if ( ce_info->num_cond_clobber_insns == 0
++ && ce_info->num_extra_move_insns == 0 )
++ *num_true_changes += 1;
++
++ ce_info->num_extra_move_insns++;
++ return op;
++ }
++ }
++
++ /* We failed to fixup the insns, so this if-then-else can not be made
++ conditional. Just return NULL_RTX so that the if-then-else conversion
++ for this if-then-else will be cancelled. */
++ return NULL_RTX;
++ }
++ end_sequence ();
++ return op;
++ }
++
++ /* Signal that we have started if conversion after reload, which means
++ that it should be safe to split all the predicable clobber insns which
++ did not become cond_exec back into a simpler form if possible. */
++ cfun->machine->ifcvt_after_reload = 1;
++
++ return pattern;
++}
++
++
++void
++avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes)
++{
++ int n;
++
++ if ( ce_info->num_extra_move_insns > 0
++ && ce_info->num_cond_clobber_insns == 0)
++ /* Signal that we did not do any changes after all. */
++ *num_true_changes -= 1;
++
++ /* Remove any inserted move insns. */
++ for ( n = 0; n < ce_info->num_extra_move_insns; n++ )
++ {
++ rtx link, prev_link;
++
++ /* Remove REG_DEAD note since we are not needing the scratch register anyway. */
++ prev_link = NULL_RTX;
++ for (link = REG_NOTES (ce_info->extra_move_insns[n]); link; link = XEXP (link, 1))
++ {
++ if (REG_NOTE_KIND (link) == REG_DEAD)
++ {
++ if ( prev_link )
++ XEXP (prev_link, 1) = XEXP (link, 1);
++ else
++ REG_NOTES (ce_info->extra_move_insns[n]) = XEXP (link, 1);
++ }
++ else
++ {
++ prev_link = link;
++ }
++ }
++
++ /* Revert all reg_notes for the moved insn. */
++ for (link = REG_NOTES (ce_info->moved_insns[n]); link; link = XEXP (link, 1))
++ {
++ REG_NOTES (ce_info->extra_move_insns[n]) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
++ XEXP (link, 0),
++ REG_NOTES (ce_info->extra_move_insns[n]));
++ }
++
++ /* Remove the moved insn. */
++ remove_insn ( ce_info->moved_insns[n] );
++ }
++}
++
++
++/* Function returning TRUE if INSN with OPERANDS is a splittable
++ conditional immediate clobber insn. We assume that the insn is
++ already a conditional immediate clobber insns and do not check
++ for that. */
++int
++avr32_cond_imm_clobber_splittable (rtx insn, rtx operands[])
++{
++ if ( REGNO (operands[0]) == REGNO (operands[1]) )
++ {
++ if ( (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is21"))
++ || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21")))
++ return FALSE;
++ }
++ else if ( (logical_binary_operator (SET_SRC (XVECEXP (PATTERN (insn),0,0)), VOIDmode)
++ || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is16"))
++ || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
++ && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks16"))) )
++ return FALSE;
++
++ return TRUE;
++}
++
++
++/* Function for getting an integer value from a const_int or const_double
++ expression regardless of the HOST_WIDE_INT size. Each target cpu word
++ will be put into the val array where the LSW will be stored at the lowest
++ address and so forth. Assumes that const_expr is either a const_int or
++ const_double. Only valid for modes which have sizes that are a multiple
++ of the word size.
++*/
++void
++avr32_get_intval (enum machine_mode mode, rtx const_expr, HOST_WIDE_INT *val)
++{
++ int words_in_mode = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
++ const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
++
++ if ( GET_CODE(const_expr) == CONST_DOUBLE ){
++ HOST_WIDE_INT hi = CONST_DOUBLE_HIGH(const_expr);
++ HOST_WIDE_INT lo = CONST_DOUBLE_LOW(const_expr);
++ /* Evaluate hi and lo values of const_double. */
++ avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
++ GEN_INT (lo),
++ &val[0]);
++ avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
++ GEN_INT (hi),
++ &val[words_in_const_int]);
++ } else if ( GET_CODE(const_expr) == CONST_INT ){
++ HOST_WIDE_INT value = INTVAL(const_expr);
++ int word;
++ for ( word = 0; (word < words_in_mode) && (word < words_in_const_int); word++ ){
++ /* Shift word up to the MSW and shift down again to extract the
++ word and sign-extend. */
++ int lshift = (words_in_const_int - word - 1) * BITS_PER_WORD;
++ int rshift = (words_in_const_int-1) * BITS_PER_WORD;
++ val[word] = (value << lshift) >> rshift;
++ }
++
++ for ( ; word < words_in_mode; word++ ){
++ /* Just put the sign bits in the remaining words. */
++ val[word] = value < 0 ? -1 : 0;
++ }
++ }
++}
++
++
++void
++avr32_split_const_expr (enum machine_mode mode, enum machine_mode new_mode,
++ rtx expr, rtx *split_expr)
++{
++ int i, word;
++ int words_in_intval = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
++ int words_in_split_values = GET_MODE_SIZE (new_mode)/UNITS_PER_WORD;
++ const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
++ HOST_WIDE_INT *val = alloca (words_in_intval * UNITS_PER_WORD);
++
++ avr32_get_intval (mode, expr, val);
++
++ for ( i=0; i < (words_in_intval/words_in_split_values); i++ )
++ {
++ HOST_WIDE_INT value_lo = 0, value_hi = 0;
++ for ( word = 0; word < words_in_split_values; word++ )
++ {
++ if ( word >= words_in_const_int )
++ value_hi |= ((val[i * words_in_split_values + word] &
++ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
++ << (BITS_PER_WORD * (word - words_in_const_int)));
++ else
++ value_lo |= ((val[i * words_in_split_values + word] &
++ (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
++ << (BITS_PER_WORD * word));
++ }
++ split_expr[i] = immed_double_const(value_lo, value_hi, new_mode);
++ }
++}
++
++
++/* Set up library functions to comply to AVR32 ABI */
++static void
++avr32_init_libfuncs (void)
++{
++ /* Convert gcc run-time function names to AVR32 ABI names */
++
++ /* Double-precision floating-point arithmetic. */
++ set_optab_libfunc (neg_optab, DFmode, NULL);
++
++ /* Double-precision comparisons. */
++ set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq");
++ set_optab_libfunc (ne_optab, DFmode, NULL);
++ set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt");
++ set_optab_libfunc (le_optab, DFmode, NULL);
++ set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge");
++ set_optab_libfunc (gt_optab, DFmode, NULL);
++
++ /* Single-precision floating-point arithmetic. */
++ set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul");
++ set_optab_libfunc (neg_optab, SFmode, NULL);
++
++ /* Single-precision comparisons. */
++ set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq");
++ set_optab_libfunc (ne_optab, SFmode, NULL);
++ set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt");
++ set_optab_libfunc (le_optab, SFmode, NULL);
++ set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge");
++ set_optab_libfunc (gt_optab, SFmode, NULL);
++
++ /* Floating-point to integer conversions. */
++ set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32");
++ set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32");
++ set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64");
++ set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64");
++ set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32");
++ set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32");
++ set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64");
++ set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64");
++
++ /* Conversions between floating types. */
++ set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32");
++ set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64");
++
++ /* Integer to floating-point conversions. Table 8. */
++ set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64");
++ set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64");
++ set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32");
++ set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32");
++ set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64");
++ set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32");
++ /* TODO: Add these to gcc library functions */
++ //set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL);
++ //set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL);
++
++ /* Long long. Table 9. */
++ set_optab_libfunc (smul_optab, DImode, "__avr32_mul64");
++ set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64");
++ set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64");
++ set_optab_libfunc (smod_optab, DImode, "__avr32_smod64");
++ set_optab_libfunc (umod_optab, DImode, "__avr32_umod64");
++ set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64");
++ set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64");
++ set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64");
++
++ /* Floating point library functions which have fast versions. */
++ if ( TARGET_FAST_FLOAT )
++ {
++ set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div_fast");
++ set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul_fast");
++ set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add_fast");
++ set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub_fast");
++ set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add_fast");
++ set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub_fast");
++ set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div_fast");
++ }
++ else
++ {
++ set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
++ set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
++ set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
++ set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
++ set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
++ set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub");
++ set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
++ }
++}
++
++
++/* Record a flashvault declaration. */
++static void
++flashvault_decl_list_add (unsigned int vector_num, const char *name)
++{
++ struct flashvault_decl_list *p;
++
++ p = (struct flashvault_decl_list *)
++ xmalloc (sizeof (struct flashvault_decl_list));
++ p->next = flashvault_decl_list_head;
++ p->name = name;
++ p->vector_num = vector_num;
++ flashvault_decl_list_head = p;
++}
++
++
++static void
++avr32_file_end (void)
++{
++ struct flashvault_decl_list *p;
++ unsigned int num_entries = 0;
++
++ /* Check if a list of flashvault declarations exists. */
++ if (flashvault_decl_list_head != NULL)
++ {
++ /* Calculate the number of entries in the table. */
++ for (p = flashvault_decl_list_head; p != NULL; p = p->next)
++ {
++ num_entries++;
++ }
++
++ /* Generate the beginning of the flashvault data table. */
++ fputs ("\t.global __fv_table\n"
++ "\t.data\n"
++ "\t.align 2\n"
++ "\t.set .LFVTABLE, . + 0\n"
++ "\t.type __fv_table, @object\n", asm_out_file);
++ /* Each table entry is 8 bytes. */
++ fprintf (asm_out_file, "\t.size __fv_table, %u\n", (num_entries * 8));
++
++ fputs("__fv_table:\n", asm_out_file);
++
++ for (p = flashvault_decl_list_head; p != NULL; p = p->next)
++ {
++ /* Output table entry. */
++ fprintf (asm_out_file,
++ "\t.align 2\n"
++ "\t.int %u\n", p->vector_num);
++ fprintf (asm_out_file,
++ "\t.align 2\n"
++ "\t.int %s\n", p->name);
++ }
++ }
++}
+--- /dev/null
++++ b/gcc/config/avr32/avr32-elf.h
+@@ -0,0 +1,91 @@
++/*
++ Elf specific definitions.
++ Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
++
++ This file is part of GCC.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++
++/*****************************************************************************
++ * Controlling the Compiler Driver, 'gcc'
++ *****************************************************************************/
++
++/* Run-time Target Specification. */
++#undef TARGET_VERSION
++#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr);
++
++/*
++Another C string constant used much like LINK_SPEC. The
++difference between the two is that STARTFILE_SPEC is used at
++the very beginning of the command given to the linker.
++
++If this macro is not defined, a default is provided that loads the
++standard C startup file from the usual place. See gcc.c.
++*/
++#if 0
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
++#endif
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC "%{mflashvault: crtfv.o%s} %{!mflashvault: crt0.o%s} \
++ crti.o%s crtbegin.o%s"
++
++#undef LINK_SPEC
++#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=uc3a3revd:-mavr32elf_uc3a3256s;:%{mpart=*:-mavr32elf_%*}} %{mcpu=*:-mavr32elf_%*}"
++
++
++/*
++Another C string constant used much like LINK_SPEC. The
++difference between the two is that ENDFILE_SPEC is used at
++the very end of the command given to the linker.
++
++Do not define this macro if it does not need to do anything.
++*/
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
++
++
++/* Target CPU builtins. */
++#define TARGET_CPU_CPP_BUILTINS() \
++ do \
++ { \
++ builtin_define ("__avr32__"); \
++ builtin_define ("__AVR32__"); \
++ builtin_define ("__AVR32_ELF__"); \
++ builtin_define (avr32_part->macro); \
++ builtin_define (avr32_arch->macro); \
++ if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
++ builtin_define ("__AVR32_AVR32A__"); \
++ else \
++ builtin_define ("__AVR32_AVR32B__"); \
++ if (TARGET_UNALIGNED_WORD) \
++ builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
++ if (TARGET_SIMD) \
++ builtin_define ("__AVR32_HAS_SIMD__"); \
++ if (TARGET_DSP) \
++ builtin_define ("__AVR32_HAS_DSP__"); \
++ if (TARGET_RMW) \
++ builtin_define ("__AVR32_HAS_RMW__"); \
++ if (TARGET_BRANCH_PRED) \
++ builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
++ if (TARGET_FAST_FLOAT) \
++ builtin_define ("__AVR32_FAST_FLOAT__"); \
++ if (TARGET_FLASHVAULT) \
++ builtin_define ("__AVR32_FLASHVAULT__"); \
++ if (TARGET_NO_MUL_INSNS) \
++ builtin_define ("__AVR32_NO_MUL__"); \
++ } \
++ while (0)
+--- /dev/null
++++ b/gcc/config/avr32/avr32.h
+@@ -0,0 +1,3316 @@
++/*
++ Definitions of target machine for AVR32.
++ Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
++
++ This file is part of GCC.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++#ifndef GCC_AVR32_H
++#define GCC_AVR32_H
++
++
++#ifndef OBJECT_FORMAT_ELF
++#error avr32.h included before elfos.h
++#endif
++
++#ifndef LOCAL_LABEL_PREFIX
++#define LOCAL_LABEL_PREFIX "."
++#endif
++
++#ifndef SUBTARGET_CPP_SPEC
++#define SUBTARGET_CPP_SPEC "-D__ELF__"
++#endif
++
++
++extern struct rtx_def *avr32_compare_op0;
++extern struct rtx_def *avr32_compare_op1;
++
++/* comparison type */
++enum avr32_cmp_type {
++ CMP_QI, /* 1 byte ->char */
++ CMP_HI, /* 2 byte->half word */
++ CMP_SI, /* four byte->word*/
++ CMP_DI, /* eight byte->double word */
++ CMP_SF, /* single precision floats */
++ CMP_MAX /* max comparison type */
++};
++
++extern enum avr32_cmp_type avr32_branch_type; /* type of branch to use */
++
++
++extern struct rtx_def *avr32_acc_cache;
++
++/* cache instruction op5 codes */
++#define AVR32_CACHE_INVALIDATE_ICACHE 1
++
++/*
++These bits describe the different types of function supported by the AVR32
++backend. They are exclusive, e.g. a function cannot be both a normal function
++and an interworked function. Knowing the type of a function is important for
++determining its prologue and epilogue sequences. Note value 7 is currently
++unassigned. Also note that the interrupt function types all have bit 2 set,
++so that they can be tested for easily. Note that 0 is deliberately chosen for
++AVR32_FT_UNKNOWN so that when the machine_function structure is initialized
++(to zero) func_type will default to unknown. This will force the first use of
++avr32_current_func_type to call avr32_compute_func_type.
++*/
++#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined. */
++#define AVR32_FT_NORMAL 1 /* Normal function. */
++#define AVR32_FT_ACALL 2 /* An acall function. */
++#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */
++#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */
++#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */
++#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */
++
++#define AVR32_FT_TYPE_MASK ((1 << 3) - 1)
++
++/* In addition functions can have several type modifiers, outlined by these bit masks: */
++#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR and above. */
++#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */
++#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */
++#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another func. */
++#define AVR32_FT_FLASHVAULT (1 << 6) /* Flashvault function call. */
++#define AVR32_FT_FLASHVAULT_IMPL (1 << 7) /* Function definition in FlashVault. */
++
++
++/* Some macros to test these flags. */
++#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK)
++#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT)
++#define IS_NAKED(t) (t & AVR32_FT_NAKED)
++#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE)
++#define IS_NESTED(t) (t & AVR32_FT_NESTED)
++#define IS_FLASHVAULT(t) (t & AVR32_FT_FLASHVAULT)
++#define IS_FLASHVAULT_IMPL(t) (t & AVR32_FT_FLASHVAULT_IMPL)
++
++#define SYMBOL_FLAG_RMW_ADDR_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT
++#define SYMBOL_REF_RMW_ADDR(RTX) \
++ ((SYMBOL_REF_FLAGS (RTX) & (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT)) != 0)
++
++
++typedef struct minipool_labels
++GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
++{
++ rtx label;
++ struct minipool_labels *prev;
++ struct minipool_labels *next;
++} minipool_labels;
++
++/* A C structure for machine-specific, per-function data.
++ This is added to the cfun structure. */
++
++typedef struct machine_function
++GTY (())
++{
++ /* Records the type of the current function. */
++ unsigned long func_type;
++ /* List of minipool labels, use for checking if code label is valid in a
++ memory expression */
++ minipool_labels *minipool_label_head;
++ minipool_labels *minipool_label_tail;
++ int ifcvt_after_reload;
++} machine_function;
++
++/* Initialize data used by insn expanders. This is called from insn_emit,
++ once for every function before code is generated. */
++#define INIT_EXPANDERS avr32_init_expanders ()
++
++/******************************************************************************
++ * SPECS
++ *****************************************************************************/
++
++#ifndef ASM_SPEC
++#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=ucr2nomul:-march=ucr2;:%{march=*:-march=%*}} %{mpart=uc3a3revd:-mpart=uc3a3256s;:%{mpart=*:-mpart=%*}}"
++#endif
++
++#ifndef MULTILIB_DEFAULTS
++#define MULTILIB_DEFAULTS { "march=ap", "" }
++#endif
++
++/******************************************************************************
++ * Run-time Target Specification
++ *****************************************************************************/
++#ifndef TARGET_VERSION
++#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)");
++#endif
++
++
++/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
++enum part_type
++{
++ PART_TYPE_AVR32_NONE,
++ PART_TYPE_AVR32_AP7000,
++ PART_TYPE_AVR32_AP7001,
++ PART_TYPE_AVR32_AP7002,
++ PART_TYPE_AVR32_AP7200,
++ PART_TYPE_AVR32_UC3A0128,
++ PART_TYPE_AVR32_UC3A0256,
++ PART_TYPE_AVR32_UC3A0512,
++ PART_TYPE_AVR32_UC3A0512ES,
++ PART_TYPE_AVR32_UC3A1128,
++ PART_TYPE_AVR32_UC3A1256,
++ PART_TYPE_AVR32_UC3A1512,
++ PART_TYPE_AVR32_UC3A1512ES,
++ PART_TYPE_AVR32_UC3A3REVD,
++ PART_TYPE_AVR32_UC3A364,
++ PART_TYPE_AVR32_UC3A364S,
++ PART_TYPE_AVR32_UC3A3128,
++ PART_TYPE_AVR32_UC3A3128S,
++ PART_TYPE_AVR32_UC3A3256,
++ PART_TYPE_AVR32_UC3A3256S,
++ PART_TYPE_AVR32_UC3A464,
++ PART_TYPE_AVR32_UC3A464S,
++ PART_TYPE_AVR32_UC3A4128,
++ PART_TYPE_AVR32_UC3A4128S,
++ PART_TYPE_AVR32_UC3A4256,
++ PART_TYPE_AVR32_UC3A4256S,
++ PART_TYPE_AVR32_UC3B064,
++ PART_TYPE_AVR32_UC3B0128,
++ PART_TYPE_AVR32_UC3B0256,
++ PART_TYPE_AVR32_UC3B0256ES,
++ PART_TYPE_AVR32_UC3B0512,
++ PART_TYPE_AVR32_UC3B0512REVC,
++ PART_TYPE_AVR32_UC3B164,
++ PART_TYPE_AVR32_UC3B1128,
++ PART_TYPE_AVR32_UC3B1256,
++ PART_TYPE_AVR32_UC3B1256ES,
++ PART_TYPE_AVR32_UC3B1512,
++ PART_TYPE_AVR32_UC3B1512REVC,
++ PART_TYPE_AVR32_UC64D3,
++ PART_TYPE_AVR32_UC128D3,
++ PART_TYPE_AVR32_UC64D4,
++ PART_TYPE_AVR32_UC128D4,
++ PART_TYPE_AVR32_UC3C0512CREVC,
++ PART_TYPE_AVR32_UC3C1512CREVC,
++ PART_TYPE_AVR32_UC3C2512CREVC,
++ PART_TYPE_AVR32_UC3L0256,
++ PART_TYPE_AVR32_UC3L0128,
++ PART_TYPE_AVR32_UC3L064,
++ PART_TYPE_AVR32_UC3L032,
++ PART_TYPE_AVR32_UC3L016,
++ PART_TYPE_AVR32_UC3L064REVB,
++ PART_TYPE_AVR32_UC64L3U,
++ PART_TYPE_AVR32_UC128L3U,
++ PART_TYPE_AVR32_UC256L3U,
++ PART_TYPE_AVR32_UC64L4U,
++ PART_TYPE_AVR32_UC128L4U,
++ PART_TYPE_AVR32_UC256L4U,
++ PART_TYPE_AVR32_UC3C064C,
++ PART_TYPE_AVR32_UC3C0128C,
++ PART_TYPE_AVR32_UC3C0256C,
++ PART_TYPE_AVR32_UC3C0512C,
++ PART_TYPE_AVR32_UC3C164C,
++ PART_TYPE_AVR32_UC3C1128C,
++ PART_TYPE_AVR32_UC3C1256C,
++ PART_TYPE_AVR32_UC3C1512C,
++ PART_TYPE_AVR32_UC3C264C,
++ PART_TYPE_AVR32_UC3C2128C,
++ PART_TYPE_AVR32_UC3C2256C,
++ PART_TYPE_AVR32_UC3C2512C,
++ PART_TYPE_AVR32_MXT768E
++};
++
++/* Microarchitectures. */
++enum microarchitecture_type
++{
++ UARCH_TYPE_AVR32A,
++ UARCH_TYPE_AVR32B,
++ UARCH_TYPE_NONE
++};
++
++/* Architectures types which specifies the pipeline.
++ Keep this in sync with avr32_arch_types in avr32.c
++ and the pipeline attribute in avr32.md */
++enum architecture_type
++{
++ ARCH_TYPE_AVR32_AP,
++ ARCH_TYPE_AVR32_UCR1,
++ ARCH_TYPE_AVR32_UCR2,
++ ARCH_TYPE_AVR32_UCR2NOMUL,
++ ARCH_TYPE_AVR32_UCR3,
++ ARCH_TYPE_AVR32_UCR3FP,
++ ARCH_TYPE_AVR32_NONE
++};
++
++/* Flag specifying if the cpu has support for DSP instructions.*/
++#define FLAG_AVR32_HAS_DSP (1 << 0)
++/* Flag specifying if the cpu has support for Read-Modify-Write
++ instructions.*/
++#define FLAG_AVR32_HAS_RMW (1 << 1)
++/* Flag specifying if the cpu has support for SIMD instructions. */
++#define FLAG_AVR32_HAS_SIMD (1 << 2)
++/* Flag specifying if the cpu has support for unaligned memory word access. */
++#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3)
++/* Flag specifying if the cpu has support for branch prediction. */
++#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4)
++/* Flag specifying if the cpu has support for a return stack. */
++#define FLAG_AVR32_HAS_RETURN_STACK (1 << 5)
++/* Flag specifying if the cpu has caches. */
++#define FLAG_AVR32_HAS_CACHES (1 << 6)
++/* Flag specifying if the cpu has support for v2 insns. */
++#define FLAG_AVR32_HAS_V2_INSNS (1 << 7)
++/* Flag specifying that the cpu has buggy mul insns. */
++#define FLAG_AVR32_HAS_NO_MUL_INSNS (1 << 8)
++/* Flag specifying that the device has FPU instructions according
++ to AVR32002 specifications*/
++#define FLAG_AVR32_HAS_FPU (1 << 9)
++
++/* Structure for holding information about different avr32 CPUs/parts */
++struct part_type_s
++{
++ const char *const name;
++ enum part_type part_type;
++ enum architecture_type arch_type;
++ /* Must lie outside user's namespace. NULL == no macro. */
++ const char *const macro;
++};
++
++/* Structure for holding information about different avr32 pipeline
++ architectures. */
++struct arch_type_s
++{
++ const char *const name;
++ enum architecture_type arch_type;
++ enum microarchitecture_type uarch_type;
++ const unsigned long feature_flags;
++ /* Must lie outside user's namespace. NULL == no macro. */
++ const char *const macro;
++};
++
++extern const struct part_type_s *avr32_part;
++extern const struct arch_type_s *avr32_arch;
++
++#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD)
++#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP)
++#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW)
++#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
++#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
++#define TARGET_RETURN_STACK (avr32_arch->feature_flags & FLAG_AVR32_HAS_RETURN_STACK)
++#define TARGET_V2_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_V2_INSNS)
++#define TARGET_CACHES (avr32_arch->feature_flags & FLAG_AVR32_HAS_CACHES)
++#define TARGET_NO_MUL_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_NO_MUL_INSNS)
++#define TARGET_ARCH_AP (avr32_arch->arch_type == ARCH_TYPE_AVR32_AP)
++#define TARGET_ARCH_UCR1 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR1)
++#define TARGET_ARCH_UCR2 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR2)
++#define TARGET_ARCH_UC (TARGET_ARCH_UCR1 || TARGET_ARCH_UCR2)
++#define TARGET_UARCH_AVR32A (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
++#define TARGET_UARCH_AVR32B (avr32_arch->uarch_type == UARCH_TYPE_AVR32B)
++#define TARGET_ARCH_FPU (avr32_arch->feature_flags & FLAG_AVR32_HAS_FPU)
++
++#define CAN_DEBUG_WITHOUT_FP
++
++
++
++
++/******************************************************************************
++ * Storage Layout
++ *****************************************************************************/
++
++/*
++Define this macro to have the value 1 if the most significant bit in a
++byte has the lowest number; otherwise define it to have the value zero.
++This means that bit-field instructions count from the most significant
++bit. If the machine has no bit-field instructions, then this must still
++be defined, but it doesn't matter which value it is defined to. This
++macro need not be a constant.
++
++This macro does not affect the way structure fields are packed into
++bytes or words; that is controlled by BYTES_BIG_ENDIAN.
++*/
++#define BITS_BIG_ENDIAN 0
++
++/*
++Define this macro to have the value 1 if the most significant byte in a
++word has the lowest number. This macro need not be a constant.
++*/
++/*
++ Data is stored in an big-endian way.
++*/
++#define BYTES_BIG_ENDIAN 1
++
++/*
++Define this macro to have the value 1 if, in a multiword object, the
++most significant word has the lowest number. This applies to both
++memory locations and registers; GCC fundamentally assumes that the
++order of words in memory is the same as the order in registers. This
++macro need not be a constant.
++*/
++/*
++ Data is stored in an bin-endian way.
++*/
++#define WORDS_BIG_ENDIAN 1
++
++/*
++Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a
++constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
++used only when compiling libgcc2.c. Typically the value will be set
++based on preprocessor defines.
++*/
++#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
++
++/*
++Define this macro to have the value 1 if DFmode, XFmode or
++TFmode floating point numbers are stored in memory with the word
++containing the sign bit at the lowest address; otherwise define it to
++have the value 0. This macro need not be a constant.
++
++You need not define this macro if the ordering is the same as for
++multi-word integers.
++*/
++/* #define FLOAT_WORDS_BIG_ENDIAN 1 */
++
++/*
++Define this macro to be the number of bits in an addressable storage
++unit (byte); normally 8.
++*/
++#define BITS_PER_UNIT 8
++
++/*
++Number of bits in a word; normally 32.
++*/
++#define BITS_PER_WORD 32
++
++/*
++Maximum number of bits in a word. If this is undefined, the default is
++BITS_PER_WORD. Otherwise, it is the constant value that is the
++largest value that BITS_PER_WORD can have at run-time.
++*/
++/* MAX_BITS_PER_WORD not defined*/
++
++/*
++Number of storage units in a word; normally 4.
++*/
++#define UNITS_PER_WORD 4
++
++/*
++Minimum number of units in a word. If this is undefined, the default is
++UNITS_PER_WORD. Otherwise, it is the constant value that is the
++smallest value that UNITS_PER_WORD can have at run-time.
++*/
++/* MIN_UNITS_PER_WORD not defined */
++
++/*
++Width of a pointer, in bits. You must specify a value no wider than the
++width of Pmode. If it is not equal to the width of Pmode,
++you must define POINTERS_EXTEND_UNSIGNED.
++*/
++#define POINTER_SIZE 32
++
++/*
++A C expression whose value is greater than zero if pointers that need to be
++extended from being POINTER_SIZE bits wide to Pmode are to
++be zero-extended and zero if they are to be sign-extended. If the value
++is less then zero then there must be an "ptr_extend" instruction that
++extends a pointer from POINTER_SIZE to Pmode.
++
++You need not define this macro if the POINTER_SIZE is equal
++to the width of Pmode.
++*/
++/* #define POINTERS_EXTEND_UNSIGNED */
++
++/*
++A Macro to update M and UNSIGNEDP when an object whose type
++is TYPE and which has the specified mode and signedness is to be
++stored in a register. This macro is only called when TYPE is a
++scalar type.
++
++On most RISC machines, which only have operations that operate on a full
++register, define this macro to set M to word_mode if
++M is an integer mode narrower than BITS_PER_WORD. In most
++cases, only integer modes should be widened because wider-precision
++floating-point operations are usually more expensive than their narrower
++counterparts.
++
++For most machines, the macro definition does not change UNSIGNEDP.
++However, some machines, have instructions that preferentially handle
++either signed or unsigned quantities of certain modes. For example, on
++the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
++sign-extend the result to 64 bits. On such machines, set
++UNSIGNEDP according to which kind of extension is more efficient.
++
++Do not define this macro if it would never modify M.
++*/
++#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \
++ { \
++ if (!AGGREGATE_TYPE_P (TYPE) \
++ && GET_MODE_CLASS (mode) == MODE_INT \
++ && GET_MODE_SIZE (mode) < 4) \
++ { \
++ if (M == QImode) \
++ (UNSIGNEDP) = 1; \
++ else if (M == HImode) \
++ (UNSIGNEDP) = 0; \
++ (M) = SImode; \
++ } \
++ }
++
++#define PROMOTE_FUNCTION_MODE(M, UNSIGNEDP, TYPE) \
++ PROMOTE_MODE(M, UNSIGNEDP, TYPE)
++
++/* Define if operations between registers always perform the operation
++ on the full register even if a narrower mode is specified. */
++#define WORD_REGISTER_OPERATIONS
++
++/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
++ will either zero-extend or sign-extend. The value of this macro should
++ be the code that says which one of the two operations is implicitly
++ done, UNKNOWN if not known. */
++#define LOAD_EXTEND_OP(MODE) \
++ (((MODE) == QImode) ? ZERO_EXTEND \
++ : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)
++
++
++/*
++Normal alignment required for function parameters on the stack, in
++bits. All stack parameters receive at least this much alignment
++regardless of data type. On most machines, this is the same as the
++size of an integer.
++*/
++#define PARM_BOUNDARY 32
++
++/*
++Define this macro to the minimum alignment enforced by hardware for the
++stack pointer on this machine. The definition is a C expression for the
++desired alignment (measured in bits). This value is used as a default
++if PREFERRED_STACK_BOUNDARY is not defined. On most machines,
++this should be the same as PARM_BOUNDARY.
++*/
++#define STACK_BOUNDARY 32
++
++/*
++Define this macro if you wish to preserve a certain alignment for the
++stack pointer, greater than what the hardware enforces. The definition
++is a C expression for the desired alignment (measured in bits). This
++macro must evaluate to a value equal to or larger than
++STACK_BOUNDARY.
++*/
++#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
++
++/*
++Alignment required for a function entry point, in bits.
++*/
++#define FUNCTION_BOUNDARY 16
++
++/*
++Biggest alignment that any data type can require on this machine, in bits.
++*/
++#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
++
++/*
++If defined, the smallest alignment, in bits, that can be given to an
++object that can be referenced in one operation, without disturbing any
++nearby object. Normally, this is BITS_PER_UNIT, but may be larger
++on machines that don't have byte or half-word store operations.
++*/
++#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT
++
++
++/*
++An integer expression for the size in bits of the largest integer machine mode that
++should actually be used. All integer machine modes of this size or smaller can be
++used for structures and unions with the appropriate sizes. If this macro is undefined,
++GET_MODE_BITSIZE (DImode) is assumed.*/
++#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
++
++
++/*
++If defined, a C expression to compute the alignment given to a constant
++that is being placed in memory. CONSTANT is the constant and
++BASIC_ALIGN is the alignment that the object would ordinarily
++have. The value of this macro is used instead of that alignment to
++align the object.
++
++If this macro is not defined, then BASIC_ALIGN is used.
++
++The typical use of this macro is to increase alignment for string
++constants to be word aligned so that strcpy calls that copy
++constants can be done inline.
++*/
++#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
++ ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN)
++
++/* Try to align string to a word. */
++#define DATA_ALIGNMENT(TYPE, ALIGN) \
++ ({(TREE_CODE (TYPE) == ARRAY_TYPE \
++ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
++
++/* Try to align local store strings to a word. */
++#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
++ ({(TREE_CODE (TYPE) == ARRAY_TYPE \
++ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
++
++/*
++Define this macro to be the value 1 if instructions will fail to work
++if given data not on the nominal alignment. If instructions will merely
++go slower in that case, define this macro as 0.
++*/
++#define STRICT_ALIGNMENT 1
++
++/*
++Define this if you wish to imitate the way many other C compilers handle
++alignment of bit-fields and the structures that contain them.
++
++The behavior is that the type written for a bit-field (int,
++short, or other integer type) imposes an alignment for the
++entire structure, as if the structure really did contain an ordinary
++field of that type. In addition, the bit-field is placed within the
++structure so that it would fit within such a field, not crossing a
++boundary for it.
++
++Thus, on most machines, a bit-field whose type is written as int
++would not cross a four-byte boundary, and would force four-byte
++alignment for the whole structure. (The alignment used may not be four
++bytes; it is controlled by the other alignment parameters.)
++
++If the macro is defined, its definition should be a C expression;
++a nonzero value for the expression enables this behavior.
++
++Note that if this macro is not defined, or its value is zero, some
++bit-fields may cross more than one alignment boundary. The compiler can
++support such references if there are insv, extv, and
++extzv insns that can directly reference memory.
++
++The other known way of making bit-fields work is to define
++STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT.
++Then every structure can be accessed with fullwords.
++
++Unless the machine has bit-field instructions or you define
++STRUCTURE_SIZE_BOUNDARY that way, you must define
++PCC_BITFIELD_TYPE_MATTERS to have a nonzero value.
++
++If your aim is to make GCC use the same conventions for laying out
++bit-fields as are used by another compiler, here is how to investigate
++what the other compiler does. Compile and run this program:
++
++struct foo1
++{
++ char x;
++ char :0;
++ char y;
++};
++
++struct foo2
++{
++ char x;
++ int :0;
++ char y;
++};
++
++main ()
++{
++ printf ("Size of foo1 is %d\n",
++ sizeof (struct foo1));
++ printf ("Size of foo2 is %d\n",
++ sizeof (struct foo2));
++ exit (0);
++}
++
++If this prints 2 and 5, then the compiler's behavior is what you would
++get from PCC_BITFIELD_TYPE_MATTERS.
++*/
++#define PCC_BITFIELD_TYPE_MATTERS 1
++
++
++/******************************************************************************
++ * Layout of Source Language Data Types
++ *****************************************************************************/
++
++/*
++A C expression for the size in bits of the type int on the
++target machine. If you don't define this, the default is one word.
++*/
++#define INT_TYPE_SIZE 32
++
++/*
++A C expression for the size in bits of the type short on the
++target machine. If you don't define this, the default is half a word. (If
++this would be less than one storage unit, it is rounded up to one unit.)
++*/
++#define SHORT_TYPE_SIZE 16
++
++/*
++A C expression for the size in bits of the type long on the
++target machine. If you don't define this, the default is one word.
++*/
++#define LONG_TYPE_SIZE 32
++
++
++/*
++A C expression for the size in bits of the type long long on the
++target machine. If you don't define this, the default is two
++words. If you want to support GNU Ada on your machine, the value of this
++macro must be at least 64.
++*/
++#define LONG_LONG_TYPE_SIZE 64
++
++/*
++A C expression for the size in bits of the type char on the
++target machine. If you don't define this, the default is
++BITS_PER_UNIT.
++*/
++#define CHAR_TYPE_SIZE 8
++
++
++/*
++A C expression for the size in bits of the C++ type bool and
++C99 type _Bool on the target machine. If you don't define
++this, and you probably shouldn't, the default is CHAR_TYPE_SIZE.
++*/
++#define BOOL_TYPE_SIZE 8
++
++
++/*
++An expression whose value is 1 or 0, according to whether the type
++char should be signed or unsigned by default. The user can
++always override this default with the options -fsigned-char
++and -funsigned-char.
++*/
++/* We are using unsigned char */
++#define DEFAULT_SIGNED_CHAR 0
++
++
++/*
++A C expression for a string describing the name of the data type to use
++for size values. The typedef name size_t is defined using the
++contents of the string.
++
++The string can contain more than one keyword. If so, separate them with
++spaces, and write first any length keyword, then unsigned if
++appropriate, and finally int. The string must exactly match one
++of the data type names defined in the function
++init_decl_processing in the file c-decl.c. You may not
++omit int or change the order - that would cause the compiler to
++crash on startup.
++
++If you don't define this macro, the default is "long unsigned int".
++*/
++#define SIZE_TYPE "long unsigned int"
++
++/*
++A C expression for a string describing the name of the data type to use
++for the result of subtracting two pointers. The typedef name
++ptrdiff_t is defined using the contents of the string. See
++SIZE_TYPE above for more information.
++
++If you don't define this macro, the default is "long int".
++*/
++#define PTRDIFF_TYPE "long int"
++
++
++/*
++A C expression for the size in bits of the data type for wide
++characters. This is used in cpp, which cannot make use of
++WCHAR_TYPE.
++*/
++#define WCHAR_TYPE_SIZE 32
++
++
++/*
++A C expression for a string describing the name of the data type to
++use for wide characters passed to printf and returned from
++getwc. The typedef name wint_t is defined using the
++contents of the string. See SIZE_TYPE above for more
++information.
++
++If you don't define this macro, the default is "unsigned int".
++*/
++#define WINT_TYPE "unsigned int"
++
++/*
++A C expression for a string describing the name of the data type that
++can represent any value of any standard or extended signed integer type.
++The typedef name intmax_t is defined using the contents of the
++string. See SIZE_TYPE above for more information.
++
++If you don't define this macro, the default is the first of
++"int", "long int", or "long long int" that has as
++much precision as long long int.
++*/
++#define INTMAX_TYPE "long long int"
++
++/*
++A C expression for a string describing the name of the data type that
++can represent any value of any standard or extended unsigned integer
++type. The typedef name uintmax_t is defined using the contents
++of the string. See SIZE_TYPE above for more information.
++
++If you don't define this macro, the default is the first of
++"unsigned int", "long unsigned int", or "long long unsigned int"
++that has as much precision as long long unsigned int.
++*/
++#define UINTMAX_TYPE "long long unsigned int"
++
++
++/******************************************************************************
++ * Register Usage
++ *****************************************************************************/
++
++/* Convert from gcc internal register number to register number
++ used in assembly code */
++#define ASM_REGNUM(reg) (LAST_REGNUM - (reg))
++
++/* Convert between register number used in assembly to gcc
++ internal register number */
++#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg))
++
++/** Basic Characteristics of Registers **/
++
++/*
++Number of hardware registers known to the compiler. They receive
++numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first
++pseudo register's number really is assigned the number
++FIRST_PSEUDO_REGISTER.
++*/
++#define FIRST_PSEUDO_REGISTER (LAST_REGNUM + 1)
++
++#define FIRST_REGNUM 0
++#define LAST_REGNUM 15
++
++/*
++An initializer that says which registers are used for fixed purposes
++all throughout the compiled code and are therefore not available for
++general allocation. These would include the stack pointer, the frame
++pointer (except on machines where that can be used as a general
++register when no frame pointer is needed), the program counter on
++machines where that is considered one of the addressable registers,
++and any other numbered register with a standard use.
++
++This information is expressed as a sequence of numbers, separated by
++commas and surrounded by braces. The nth number is 1 if
++register n is fixed, 0 otherwise.
++
++The table initialized from this macro, and the table initialized by
++the following one, may be overridden at run time either automatically,
++by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by
++the user with the command options -ffixed-[reg],
++-fcall-used-[reg] and -fcall-saved-[reg].
++*/
++
++/* The internal gcc register numbers are reversed
++ compared to the real register numbers since
++ gcc expects data types stored over multiple
++ registers in the register file to be big endian
++ if the memory layout is big endian. But this
++ is not the case for avr32 so we fake a big
++ endian register file. */
++
++#define FIXED_REGISTERS { \
++ 1, /* Program Counter */ \
++ 0, /* Link Register */ \
++ 1, /* Stack Pointer */ \
++ 0, /* r12 */ \
++ 0, /* r11 */ \
++ 0, /* r10 */ \
++ 0, /* r9 */ \
++ 0, /* r8 */ \
++ 0, /* r7 */ \
++ 0, /* r6 */ \
++ 0, /* r5 */ \
++ 0, /* r4 */ \
++ 0, /* r3 */ \
++ 0, /* r2 */ \
++ 0, /* r1 */ \
++ 0, /* r0 */ \
++}
++
++/*
++Like FIXED_REGISTERS but has 1 for each register that is
++clobbered (in general) by function calls as well as for fixed
++registers. This macro therefore identifies the registers that are not
++available for general allocation of values that must live across
++function calls.
++
++If a register has 0 in CALL_USED_REGISTERS, the compiler
++automatically saves it on function entry and restores it on function
++exit, if the register is used within the function.
++*/
++#define CALL_USED_REGISTERS { \
++ 1, /* Program Counter */ \
++ 0, /* Link Register */ \
++ 1, /* Stack Pointer */ \
++ 1, /* r12 */ \
++ 1, /* r11 */ \
++ 1, /* r10 */ \
++ 1, /* r9 */ \
++ 1, /* r8 */ \
++ 0, /* r7 */ \
++ 0, /* r6 */ \
++ 0, /* r5 */ \
++ 0, /* r4 */ \
++ 0, /* r3 */ \
++ 0, /* r2 */ \
++ 0, /* r1 */ \
++ 0, /* r0 */ \
++}
++
++/* Interrupt functions can only use registers that have already been
++ saved by the prologue, even if they would normally be
++ call-clobbered. */
++#define HARD_REGNO_RENAME_OK(SRC, DST) \
++ (! IS_INTERRUPT (cfun->machine->func_type) || \
++ df_regs_ever_live_p (DST))
++
++
++/*
++Zero or more C statements that may conditionally modify five variables
++fixed_regs, call_used_regs, global_regs,
++reg_names, and reg_class_contents, to take into account
++any dependence of these register sets on target flags. The first three
++of these are of type char [] (interpreted as Boolean vectors).
++global_regs is a const char *[], and
++reg_class_contents is a HARD_REG_SET. Before the macro is
++called, fixed_regs, call_used_regs,
++reg_class_contents, and reg_names have been initialized
++from FIXED_REGISTERS, CALL_USED_REGISTERS,
++REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively.
++global_regs has been cleared, and any -ffixed-[reg],
++-fcall-used-[reg] and -fcall-saved-[reg]
++command options have been applied.
++
++You need not define this macro if it has no work to do.
++
++If the usage of an entire class of registers depends on the target
++flags, you may indicate this to GCC by using this macro to modify
++fixed_regs and call_used_regs to 1 for each of the
++registers in the classes which should not be used by GCC. Also define
++the macro REG_CLASS_FROM_LETTER to return NO_REGS if it
++is called with a letter for a class that shouldn't be used.
++
++ (However, if this class is not included in GENERAL_REGS and all
++of the insn patterns whose constraints permit this class are
++controlled by target switches, then GCC will automatically avoid using
++these registers when the target switches are opposed to them.)
++*/
++#define CONDITIONAL_REGISTER_USAGE \
++ do \
++ { \
++ if (flag_pic) \
++ { \
++ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
++ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
++ } \
++ } \
++ while (0)
++
++
++/*
++If the program counter has a register number, define this as that
++register number. Otherwise, do not define it.
++*/
++
++#define LAST_AVR32_REGNUM 16
++
++
++/** Order of Allocation of Registers **/
++
++/*
++If defined, an initializer for a vector of integers, containing the
++numbers of hard registers in the order in which GCC should prefer
++to use them (from most preferred to least).
++
++If this macro is not defined, registers are used lowest numbered first
++(all else being equal).
++
++One use of this macro is on machines where the highest numbered
++registers must always be saved and the save-multiple-registers
++instruction supports only sequences of consecutive registers. On such
++machines, define REG_ALLOC_ORDER to be an initializer that lists
++the highest numbered allocable register first.
++*/
++#define REG_ALLOC_ORDER \
++{ \
++ INTERNAL_REGNUM(8), \
++ INTERNAL_REGNUM(9), \
++ INTERNAL_REGNUM(10), \
++ INTERNAL_REGNUM(11), \
++ INTERNAL_REGNUM(12), \
++ LR_REGNUM, \
++ INTERNAL_REGNUM(7), \
++ INTERNAL_REGNUM(6), \
++ INTERNAL_REGNUM(5), \
++ INTERNAL_REGNUM(4), \
++ INTERNAL_REGNUM(3), \
++ INTERNAL_REGNUM(2), \
++ INTERNAL_REGNUM(1), \
++ INTERNAL_REGNUM(0), \
++ SP_REGNUM, \
++ PC_REGNUM \
++}
++
++
++/** How Values Fit in Registers **/
++
++/*
++A C expression for the number of consecutive hard registers, starting
++at register number REGNO, required to hold a value of mode
++MODE.
++
++On a machine where all registers are exactly one word, a suitable
++definition of this macro is
++
++#define HARD_REGNO_NREGS(REGNO, MODE) \
++ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
++ / UNITS_PER_WORD)
++*/
++#define HARD_REGNO_NREGS(REGNO, MODE) \
++ ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD))
++
++/*
++A C expression that is nonzero if it is permissible to store a value
++of mode MODE in hard register number REGNO (or in several
++registers starting with that one). For a machine where all registers
++are equivalent, a suitable definition is
++
++ #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
++
++You need not include code to check for the numbers of fixed registers,
++because the allocation mechanism considers them to be always occupied.
++
++On some machines, double-precision values must be kept in even/odd
++register pairs. You can implement that by defining this macro to reject
++odd register numbers for such modes.
++
++The minimum requirement for a mode to be OK in a register is that the
++mov[mode] instruction pattern support moves between the
++register and other hard register in the same class and that moving a
++value into the register and back out not alter it.
++
++Since the same instruction used to move word_mode will work for
++all narrower integer modes, it is not necessary on any machine for
++HARD_REGNO_MODE_OK to distinguish between these modes, provided
++you define patterns movhi, etc., to take advantage of this. This
++is useful because of the interaction between HARD_REGNO_MODE_OK
++and MODES_TIEABLE_P; it is very desirable for all integer modes
++to be tieable.
++
++Many machines have special registers for floating point arithmetic.
++Often people assume that floating point machine modes are allowed only
++in floating point registers. This is not true. Any registers that
++can hold integers can safely hold a floating point machine
++mode, whether or not floating arithmetic can be done on it in those
++registers. Integer move instructions can be used to move the values.
++
++On some machines, though, the converse is true: fixed-point machine
++modes may not go in floating registers. This is true if the floating
++registers normalize any value stored in them, because storing a
++non-floating value there would garble it. In this case,
++HARD_REGNO_MODE_OK should reject fixed-point machine modes in
++floating registers. But if the floating registers do not automatically
++normalize, if you can store any bit pattern in one and retrieve it
++unchanged without a trap, then any machine mode may go in a floating
++register, so you can define this macro to say so.
++
++The primary significance of special floating registers is rather that
++they are the registers acceptable in floating point arithmetic
++instructions. However, this is of no concern to
++HARD_REGNO_MODE_OK. You handle it by writing the proper
++constraints for those instructions.
++
++On some machines, the floating registers are especially slow to access,
++so that it is better to store a value in a stack frame than in such a
++register if floating point arithmetic is not being done. As long as the
++floating registers are not in class GENERAL_REGS, they will not
++be used unless some pattern's constraint asks for one.
++*/
++#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE)
++
++/*
++A C expression that is nonzero if a value of mode
++MODE1 is accessible in mode MODE2 without copying.
++
++If HARD_REGNO_MODE_OK(R, MODE1) and
++HARD_REGNO_MODE_OK(R, MODE2) are always the same for
++any R, then MODES_TIEABLE_P(MODE1, MODE2)
++should be nonzero. If they differ for any R, you should define
++this macro to return zero unless some other mechanism ensures the
++accessibility of the value in a narrower mode.
++
++You should define this macro to return nonzero in as many cases as
++possible since doing so will allow GCC to perform better register
++allocation.
++*/
++#define MODES_TIEABLE_P(MODE1, MODE2) \
++ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
++
++
++
++/******************************************************************************
++ * Register Classes
++ *****************************************************************************/
++
++/*
++An enumeral type that must be defined with all the register class names
++as enumeral values. NO_REGS must be first. ALL_REGS
++must be the last register class, followed by one more enumeral value,
++LIM_REG_CLASSES, which is not a register class but rather
++tells how many classes there are.
++
++Each register class has a number, which is the value of casting
++the class name to type int. The number serves as an index
++in many of the tables described below.
++*/
++enum reg_class
++{
++ NO_REGS,
++ GENERAL_REGS,
++ ALL_REGS,
++ LIM_REG_CLASSES
++};
++
++/*
++The number of distinct register classes, defined as follows:
++ #define N_REG_CLASSES (int) LIM_REG_CLASSES
++*/
++#define N_REG_CLASSES (int)LIM_REG_CLASSES
++
++/*
++An initializer containing the names of the register classes as C string
++constants. These names are used in writing some of the debugging dumps.
++*/
++#define REG_CLASS_NAMES \
++{ \
++ "NO_REGS", \
++ "GENERAL_REGS", \
++ "ALL_REGS" \
++}
++
++/*
++An initializer containing the contents of the register classes, as integers
++which are bit masks. The nth integer specifies the contents of class
++n. The way the integer mask is interpreted is that
++register r is in the class if mask & (1 << r) is 1.
++
++When the machine has more than 32 registers, an integer does not suffice.
++Then the integers are replaced by sub-initializers, braced groupings containing
++several integers. Each sub-initializer must be suitable as an initializer
++for the type HARD_REG_SET which is defined in hard-reg-set.h.
++In this situation, the first integer in each sub-initializer corresponds to
++registers 0 through 31, the second integer to registers 32 through 63, and
++so on.
++*/
++#define REG_CLASS_CONTENTS { \
++ {0x00000000}, /* NO_REGS */ \
++ {0x0000FFFF}, /* GENERAL_REGS */ \
++ {0x7FFFFFFF}, /* ALL_REGS */ \
++}
++
++
++/*
++A C expression whose value is a register class containing hard register
++REGNO. In general there is more than one such class; choose a class
++which is minimal, meaning that no smaller class also contains the
++register.
++*/
++#define REGNO_REG_CLASS(REGNO) (GENERAL_REGS)
++
++/*
++A macro whose definition is the name of the class to which a valid
++base register must belong. A base register is one used in an address
++which is the register value plus a displacement.
++*/
++#define BASE_REG_CLASS GENERAL_REGS
++
++/*
++This is a variation of the BASE_REG_CLASS macro which allows
++the selection of a base register in a mode depenedent manner. If
++mode is VOIDmode then it should return the same value as
++BASE_REG_CLASS.
++*/
++#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
++
++/*
++A macro whose definition is the name of the class to which a valid
++index register must belong. An index register is one used in an
++address where its value is either multiplied by a scale factor or
++added to another register (as well as added to a displacement).
++*/
++#define INDEX_REG_CLASS BASE_REG_CLASS
++
++/*
++A C expression which defines the machine-dependent operand constraint
++letters for register classes. If CHAR is such a letter, the
++value should be the register class corresponding to it. Otherwise,
++the value should be NO_REGS. The register letter r,
++corresponding to class GENERAL_REGS, will not be passed
++to this macro; you do not need to handle it.
++*/
++#define REG_CLASS_FROM_LETTER(CHAR) NO_REGS
++
++/* These assume that REGNO is a hard or pseudo reg number.
++ They give nonzero only if REGNO is a hard reg of the suitable class
++ or a pseudo reg currently allocated to a suitable hard reg.
++ Since they use reg_renumber, they are safe only once reg_renumber
++ has been allocated, which happens in local-alloc.c. */
++#define TEST_REGNO(R, TEST, VALUE) \
++ ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
++
++/*
++A C expression which is nonzero if register number num is suitable for use as a base
++register in operand addresses. It may be either a suitable hard register or a pseudo
++register that has been allocated such a hard register.
++*/
++#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
++
++/* The following macro defines cover classes for Integrated Register
++ Allocator. Cover classes is a set of non-intersected register
++ classes covering all hard registers used for register allocation
++ purpose. Any move between two registers of a cover class should be
++ cheaper than load or store of the registers. The macro value is
++ array of register classes with LIM_REG_CLASSES used as the end
++ marker. */
++
++#define IRA_COVER_CLASSES \
++{ \
++ GENERAL_REGS, LIM_REG_CLASSES \
++}
++
++/*
++A C expression which is nonzero if register number NUM is
++suitable for use as an index register in operand addresses. It may be
++either a suitable hard register or a pseudo register that has been
++allocated such a hard register.
++
++The difference between an index register and a base register is that
++the index register may be scaled. If an address involves the sum of
++two registers, neither one of them scaled, then either one may be
++labeled the ``base'' and the other the ``index''; but whichever
++labeling is used must fit the machine's constraints of which registers
++may serve in each capacity. The compiler will try both labelings,
++looking for one that is valid, and will reload one or both registers
++only if neither labeling works.
++*/
++#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
++
++/*
++A C expression that places additional restrictions on the register class
++to use when it is necessary to copy value X into a register in class
++CLASS. The value is a register class; perhaps CLASS, or perhaps
++another, smaller class. On many machines, the following definition is
++safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
++
++Sometimes returning a more restrictive class makes better code. For
++example, on the 68000, when X is an integer constant that is in range
++for a 'moveq' instruction, the value of this macro is always
++DATA_REGS as long as CLASS includes the data registers.
++Requiring a data register guarantees that a 'moveq' will be used.
++
++If X is a const_double, by returning NO_REGS
++you can force X into a memory constant. This is useful on
++certain machines where immediate floating values cannot be loaded into
++certain kinds of registers.
++*/
++#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS
++
++
++
++/*
++A C expression for the maximum number of consecutive registers
++of class CLASS needed to hold a value of mode MODE.
++
++This is closely related to the macro HARD_REGNO_NREGS. In fact,
++the value of the macro CLASS_MAX_NREGS(CLASS, MODE)
++should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE)
++for all REGNO values in the class CLASS.
++
++This macro helps control the handling of multiple-word values
++in the reload pass.
++*/
++#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \
++ (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
++
++
++/*
++ Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P
++ in order to support constraints with more than one letter.
++ Only two letters are then used for constant constraints,
++ the letter 'K' and the letter 'I'. The constraint starting with
++ these letters must consist of four characters. The character following
++ 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify
++ if the constant is zero or sign extended. The last two characters specify
++ the length in bits of the constant. The base constraint letter 'I' means
++ that this is an negated constant, meaning that actually -VAL should be
++ checked to lie withing the valid range instead of VAL which is used when
++ 'K' is the base constraint letter.
++
++*/
++
++#define CONSTRAINT_LEN(C, STR) \
++ ( ((C) == 'K' || (C) == 'I') ? 4 : \
++ ((C) == 'R') ? 5 : \
++ ((C) == 'P') ? -1 : \
++ DEFAULT_CONSTRAINT_LEN((C), (STR)) )
++
++#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \
++ avr32_const_ok_for_constraint_p(VALUE, C, STR)
++
++/*
++A C expression that defines the machine-dependent operand constraint
++letters that specify particular ranges of const_double values ('G' or 'H').
++
++If C is one of those letters, the expression should check that
++VALUE, an RTX of code const_double, is in the appropriate
++range and return 1 if so, 0 otherwise. If C is not one of those
++letters, the value should be 0 regardless of VALUE.
++
++const_double is used for all floating-point constants and for
++DImode fixed-point constants. A given letter can accept either
++or both kinds of values. It can use GET_MODE to distinguish
++between these kinds.
++*/
++#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \
++ ((C) == 'G' ? avr32_const_double_immediate(OP) : 0)
++
++/*
++A C expression that defines the optional machine-dependent constraint
++letters that can be used to segregate specific types of operands, usually
++memory references, for the target machine. Any letter that is not
++elsewhere defined and not matched by REG_CLASS_FROM_LETTER
++may be used. Normally this macro will not be defined.
++
++If it is required for a particular target machine, it should return 1
++if VALUE corresponds to the operand type represented by the
++constraint letter C. If C is not defined as an extra
++constraint, the value returned should be 0 regardless of VALUE.
++
++For example, on the ROMP, load instructions cannot have their output
++in r0 if the memory reference contains a symbolic address. Constraint
++letter 'Q' is defined as representing a memory address that does
++not contain a symbolic address. An alternative is specified with
++a 'Q' constraint on the input and 'r' on the output. The next
++alternative specifies 'm' on the input and a register class that
++does not include r0 on the output.
++*/
++#define EXTRA_CONSTRAINT_STR(OP, C, STR) \
++ ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \
++ (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \
++ (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \
++ && avr32_const_ok_for_constraint_p( \
++ INTVAL(XEXP(XEXP(OP, 0), 1)), \
++ (STR)[1], &(STR)[1]))) : \
++ (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \
++ (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \
++ (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \
++ (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \
++ (C) == 'Q' ? avr32_non_rmw_memory_operand(OP, GET_MODE(OP)) : \
++ (C) == 'Y' ? avr32_rmw_memory_operand(OP, GET_MODE(OP)) : \
++ 0)
++
++
++#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \
++ ((C) == 'Q') || \
++ ((C) == 'S') || \
++ ((C) == 'Y') || \
++ ((C) == 'Z') )
++
++
++/* Returns nonzero if op is a function SYMBOL_REF which
++ can be called using an rcall instruction */
++#define SYMBOL_REF_RCALL_FUNCTION_P(op) \
++ ( GET_CODE(op) == SYMBOL_REF \
++ && SYMBOL_REF_FUNCTION_P(op) \
++ && SYMBOL_REF_LOCAL_P(op) \
++ && !SYMBOL_REF_EXTERNAL_P(op) \
++ && !TARGET_HAS_ASM_ADDR_PSEUDOS )
++
++/******************************************************************************
++ * Stack Layout and Calling Conventions
++ *****************************************************************************/
++
++/** Basic Stack Layout **/
++
++/*
++Define this macro if pushing a word onto the stack moves the stack
++pointer to a smaller address.
++
++When we say, ``define this macro if ...,'' it means that the
++compiler checks this macro only with #ifdef so the precise
++definition used does not matter.
++*/
++/* pushm decrece SP: *(--SP) <-- Rx */
++#define STACK_GROWS_DOWNWARD
++
++/*
++This macro defines the operation used when something is pushed
++on the stack. In RTL, a push operation will be
++(set (mem (STACK_PUSH_CODE (reg sp))) ...)
++
++The choices are PRE_DEC, POST_DEC, PRE_INC,
++and POST_INC. Which of these is correct depends on
++the stack direction and on whether the stack pointer points
++to the last item on the stack or whether it points to the
++space for the next item on the stack.
++
++The default is PRE_DEC when STACK_GROWS_DOWNWARD is
++defined, which is almost always right, and PRE_INC otherwise,
++which is often wrong.
++*/
++/* pushm: *(--SP) <-- Rx */
++#define STACK_PUSH_CODE PRE_DEC
++
++/* Define this to nonzero if the nominal address of the stack frame
++ is at the high-address end of the local variables;
++ that is, each additional local variable allocated
++ goes at a more negative offset in the frame. */
++#define FRAME_GROWS_DOWNWARD 1
++
++
++/*
++Offset from the frame pointer to the first local variable slot to be allocated.
++
++If FRAME_GROWS_DOWNWARD, find the next slot's offset by
++subtracting the first slot's length from STARTING_FRAME_OFFSET.
++Otherwise, it is found by adding the length of the first slot to the
++value STARTING_FRAME_OFFSET.
++ (i'm not sure if the above is still correct.. had to change it to get
++ rid of an overfull. --mew 2feb93 )
++*/
++#define STARTING_FRAME_OFFSET 0
++
++/*
++Offset from the stack pointer register to the first location at which
++outgoing arguments are placed. If not specified, the default value of
++zero is used. This is the proper value for most machines.
++
++If ARGS_GROW_DOWNWARD, this is the offset to the location above
++the first location at which outgoing arguments are placed.
++*/
++#define STACK_POINTER_OFFSET 0
++
++/*
++Offset from the argument pointer register to the first argument's
++address. On some machines it may depend on the data type of the
++function.
++
++If ARGS_GROW_DOWNWARD, this is the offset to the location above
++the first argument's address.
++*/
++#define FIRST_PARM_OFFSET(FUNDECL) 0
++
++
++/*
++A C expression whose value is RTL representing the address in a stack
++frame where the pointer to the caller's frame is stored. Assume that
++FRAMEADDR is an RTL expression for the address of the stack frame
++itself.
++
++If you don't define this macro, the default is to return the value
++of FRAMEADDR - that is, the stack frame address is also the
++address of the stack word that points to the previous frame.
++*/
++#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4)
++
++
++/*
++A C expression whose value is RTL representing the value of the return
++address for the frame COUNT steps up from the current frame, after
++the prologue. FRAMEADDR is the frame pointer of the COUNT
++frame, or the frame pointer of the COUNT - 1 frame if
++RETURN_ADDR_IN_PREVIOUS_FRAME is defined.
++
++The value of the expression must always be the correct address when
++COUNT is zero, but may be NULL_RTX if there is not way to
++determine the return address of other frames.
++*/
++#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR)
++
++
++/*
++A C expression whose value is RTL representing the location of the
++incoming return address at the beginning of any function, before the
++prologue. This RTL is either a REG, indicating that the return
++value is saved in 'REG', or a MEM representing a location in
++the stack.
++
++You only need to define this macro if you want to support call frame
++debugging information like that provided by DWARF 2.
++
++If this RTL is a REG, you should also define
++DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO).
++*/
++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
++
++/*
++A C expression whose value is an integer giving the offset, in bytes,
++from the value of the stack pointer register to the top of the stack
++frame at the beginning of any function, before the prologue. The top of
++the frame is defined to be the value of the stack pointer in the
++previous frame, just before the call instruction.
++
++You only need to define this macro if you want to support call frame
++debugging information like that provided by DWARF 2.
++*/
++#define INCOMING_FRAME_SP_OFFSET 0
++
++
++/** Exception Handling Support **/
++
++/* Use setjump/longjump for exception handling. */
++#define DWARF2_UNWIND_INFO 0
++#define MUST_USE_SJLJ_EXCEPTIONS 1
++
++/*
++A C expression whose value is the Nth register number used for
++data by exception handlers, or INVALID_REGNUM if fewer than
++N registers are usable.
++
++The exception handling library routines communicate with the exception
++handlers via a set of agreed upon registers. Ideally these registers
++should be call-clobbered; it is possible to use call-saved registers,
++but may negatively impact code size. The target must support at least
++2 data registers, but should define 4 if there are enough free registers.
++
++You must define this macro if you want to support call frame exception
++handling like that provided by DWARF 2.
++*/
++/*
++ Use r9-r11
++*/
++#define EH_RETURN_DATA_REGNO(N) \
++ ((N<3) ? INTERNAL_REGNUM(N+9) : INVALID_REGNUM)
++
++/*
++A C expression whose value is RTL representing a location in which
++to store a stack adjustment to be applied before function return.
++This is used to unwind the stack to an exception handler's call frame.
++It will be assigned zero on code paths that return normally.
++
++Typically this is a call-clobbered hard register that is otherwise
++untouched by the epilogue, but could also be a stack slot.
++
++You must define this macro if you want to support call frame exception
++handling like that provided by DWARF 2.
++*/
++/*
++ Use r8
++*/
++#define EH_RETURN_STACKADJ_REGNO INTERNAL_REGNUM(8)
++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
++
++/*
++A C expression whose value is RTL representing a location in which
++to store the address of an exception handler to which we should
++return. It will not be assigned on code paths that return normally.
++
++Typically this is the location in the call frame at which the normal
++return address is stored. For targets that return by popping an
++address off the stack, this might be a memory address just below
++the target call frame rather than inside the current call
++frame. EH_RETURN_STACKADJ_RTX will have already been assigned,
++so it may be used to calculate the location of the target call frame.
++
++Some targets have more complex requirements than storing to an
++address calculable during initial code generation. In that case
++the eh_return instruction pattern should be used instead.
++
++If you want to support call frame exception handling, you must
++define either this macro or the eh_return instruction pattern.
++*/
++/*
++ We define the eh_return instruction pattern, so this isn't needed.
++*/
++/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */
++
++/*
++ This macro chooses the encoding of pointers embedded in the
++ exception handling sections. If at all possible, this should be
++ defined such that the exception handling section will not require
++ dynamic relocations, and so may be read-only.
++
++ code is 0 for data, 1 for code labels, 2 for function
++ pointers. global is true if the symbol may be affected by dynamic
++ relocations. The macro should return a combination of the DW_EH_PE_*
++ defines as found in dwarf2.h.
++
++ If this macro is not defined, pointers will not be encoded but
++ represented directly.
++*/
++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
++ ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \
++ | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \
++ | DW_EH_PE_sdata4)
++
++/* ToDo: The rest of this subsection */
++
++/** Specifying How Stack Checking is Done **/
++/* ToDo: All in this subsection */
++
++/** Registers That Address the Stack Frame **/
++
++/*
++The register number of the stack pointer register, which must also be a
++fixed register according to FIXED_REGISTERS. On most machines,
++the hardware determines which register this is.
++*/
++/* Using r13 as stack pointer. */
++#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13)
++
++/*
++The register number of the frame pointer register, which is used to
++access automatic variables in the stack frame. On some machines, the
++hardware determines which register this is. On other machines, you can
++choose any register you wish for this purpose.
++*/
++/* Use r7 */
++#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7)
++
++/*
++The register number of the arg pointer register, which is used to access
++the function's argument list. On some machines, this is the same as the
++frame pointer register. On some machines, the hardware determines which
++register this is. On other machines, you can choose any register you
++wish for this purpose. If this is not the same register as the frame
++pointer register, then you must mark it as a fixed register according to
++FIXED_REGISTERS, or arrange to be able to eliminate it (see Section
++10.10.5 [Elimination], page 224).
++*/
++/* Using r5 */
++#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4)
++
++
++/*
++Register numbers used for passing a function's static chain pointer. If
++register windows are used, the register number as seen by the called
++function is STATIC_CHAIN_INCOMING_REGNUM, while the register
++number as seen by the calling function is STATIC_CHAIN_REGNUM. If
++these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need
++not be defined.
++
++The static chain register need not be a fixed register.
++
++If the static chain is passed in memory, these macros should not be
++defined; instead, the next two macros should be defined.
++*/
++/* Using r0 */
++#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0)
++
++/** Eliminating Frame Pointer and Arg Pointer **/
++
++/*
++A C expression which is nonzero if a function must have and use a frame
++pointer. This expression is evaluated in the reload pass. If its value is
++nonzero the function will have a frame pointer.
++
++The expression can in principle examine the current function and decide
++according to the facts, but on most machines the constant 0 or the
++constant 1 suffices. Use 0 when the machine allows code to be generated
++with no frame pointer, and doing so saves some time or space. Use 1
++when there is no possible advantage to avoiding a frame pointer.
++
++In certain cases, the compiler does not know how to produce valid code
++without a frame pointer. The compiler recognizes those cases and
++automatically gives the function a frame pointer regardless of what
++FRAME_POINTER_REQUIRED says. You don't need to worry about
++them.
++
++In a function that does not require a frame pointer, the frame pointer
++register can be allocated for ordinary usage, unless you mark it as a
++fixed register. See FIXED_REGISTERS for more information.
++*/
++/* We need the frame pointer when compiling for profiling */
++#define FRAME_POINTER_REQUIRED (crtl->profile)
++
++/*
++A C statement to store in the variable DEPTH_VAR the difference
++between the frame pointer and the stack pointer values immediately after
++the function prologue. The value would be computed from information
++such as the result of get_frame_size () and the tables of
++registers regs_ever_live and call_used_regs.
++
++If ELIMINABLE_REGS is defined, this macro will be not be used and
++need not be defined. Otherwise, it must be defined even if
++FRAME_POINTER_REQUIRED is defined to always be true; in that
++case, you may set DEPTH_VAR to anything.
++*/
++#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size())
++
++/*
++If defined, this macro specifies a table of register pairs used to
++eliminate unneeded registers that point into the stack frame. If it is not
++defined, the only elimination attempted by the compiler is to replace
++references to the frame pointer with references to the stack pointer.
++
++The definition of this macro is a list of structure initializations, each
++of which specifies an original and replacement register.
++
++On some machines, the position of the argument pointer is not known until
++the compilation is completed. In such a case, a separate hard register
++must be used for the argument pointer. This register can be eliminated by
++replacing it with either the frame pointer or the argument pointer,
++depending on whether or not the frame pointer has been eliminated.
++
++In this case, you might specify:
++ #define ELIMINABLE_REGS \
++ {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
++ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
++
++Note that the elimination of the argument pointer with the stack pointer is
++specified first since that is the preferred elimination.
++*/
++#define ELIMINABLE_REGS \
++{ \
++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
++ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
++ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \
++}
++
++/*
++A C expression that returns nonzero if the compiler is allowed to try
++to replace register number FROM with register number
++TO. This macro need only be defined if ELIMINABLE_REGS
++is defined, and will usually be the constant 1, since most of the cases
++preventing register elimination are things that the compiler already
++knows about.
++*/
++#define CAN_ELIMINATE(FROM, TO) 1
++
++/*
++This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It
++specifies the initial difference between the specified pair of
++registers. This macro must be defined if ELIMINABLE_REGS is
++defined.
++*/
++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
++ ((OFFSET) = avr32_initial_elimination_offset(FROM, TO))
++
++/** Passing Function Arguments on the Stack **/
++
++
++/*
++A C expression. If nonzero, push insns will be used to pass
++outgoing arguments.
++If the target machine does not have a push instruction, set it to zero.
++That directs GCC to use an alternate strategy: to
++allocate the entire argument block and then store the arguments into
++it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
++*/
++#define PUSH_ARGS 1
++
++/*
++A C expression that is the number of bytes actually pushed onto the
++stack when an instruction attempts to push NPUSHED bytes.
++
++On some machines, the definition
++
++ #define PUSH_ROUNDING(BYTES) (BYTES)
++
++will suffice. But on other machines, instructions that appear
++to push one byte actually push two bytes in an attempt to maintain
++alignment. Then the definition should be
++
++ #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
++*/
++/* Push 4 bytes at the time. */
++#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
++
++/*
++A C expression. If nonzero, the maximum amount of space required for
++outgoing arguments will be computed and placed into the variable
++current_function_outgoing_args_size. No space will be pushed
++onto the stack for each call; instead, the function prologue should
++increase the stack frame size by this amount.
++
++Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
++*/
++#define ACCUMULATE_OUTGOING_ARGS 0
++
++/*
++A C expression that should indicate the number of bytes of its own
++arguments that a function pops on returning, or 0 if the
++function pops no arguments and the caller must therefore pop them all
++after the function returns.
++
++FUNDECL is a C variable whose value is a tree node that describes
++the function in question. Normally it is a node of type
++FUNCTION_DECL that describes the declaration of the function.
++From this you can obtain the DECL_ATTRIBUTES of the function.
++
++FUNTYPE is a C variable whose value is a tree node that
++describes the function in question. Normally it is a node of type
++FUNCTION_TYPE that describes the data type of the function.
++From this it is possible to obtain the data types of the value and
++arguments (if known).
++
++When a call to a library function is being considered, FUNDECL
++will contain an identifier node for the library function. Thus, if
++you need to distinguish among various library functions, you can do so
++by their names. Note that ``library function'' in this context means
++a function used to perform arithmetic, whose name is known specially
++in the compiler and was not mentioned in the C code being compiled.
++
++STACK_SIZE is the number of bytes of arguments passed on the
++stack. If a variable number of bytes is passed, it is zero, and
++argument popping will always be the responsibility of the calling function.
++
++On the VAX, all functions always pop their arguments, so the definition
++of this macro is STACK_SIZE. On the 68000, using the standard
++calling convention, no functions pop their arguments, so the value of
++the macro is always 0 in this case. But an alternative calling
++convention is available in which functions that take a fixed number of
++arguments pop them but other functions (such as printf) pop
++nothing (the caller pops all). When this convention is in use,
++FUNTYPE is examined to determine whether a function takes a fixed
++number of arguments.
++*/
++#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
++
++
++/*Return true if this function can we use a single return instruction*/
++#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND)
++
++/*
++A C expression that should indicate the number of bytes a call sequence
++pops off the stack. It is added to the value of RETURN_POPS_ARGS
++when compiling a function call.
++
++CUM is the variable in which all arguments to the called function
++have been accumulated.
++
++On certain architectures, such as the SH5, a call trampoline is used
++that pops certain registers off the stack, depending on the arguments
++that have been passed to the function. Since this is a property of the
++call site, not of the called function, RETURN_POPS_ARGS is not
++appropriate.
++*/
++#define CALL_POPS_ARGS(CUM) 0
++
++/* Passing Arguments in Registers */
++
++/*
++A C expression that controls whether a function argument is passed
++in a register, and which register.
++
++The arguments are CUM, which summarizes all the previous
++arguments; MODE, the machine mode of the argument; TYPE,
++the data type of the argument as a tree node or 0 if that is not known
++(which happens for C support library functions); and NAMED,
++which is 1 for an ordinary argument and 0 for nameless arguments that
++correspond to '...' in the called function's prototype.
++TYPE can be an incomplete type if a syntax error has previously
++occurred.
++
++The value of the expression is usually either a reg RTX for the
++hard register in which to pass the argument, or zero to pass the
++argument on the stack.
++
++For machines like the VAX and 68000, where normally all arguments are
++pushed, zero suffices as a definition.
++
++The value of the expression can also be a parallel RTX. This is
++used when an argument is passed in multiple locations. The mode of the
++of the parallel should be the mode of the entire argument. The
++parallel holds any number of expr_list pairs; each one
++describes where part of the argument is passed. In each
++expr_list the first operand must be a reg RTX for the hard
++register in which to pass this part of the argument, and the mode of the
++register RTX indicates how large this part of the argument is. The
++second operand of the expr_list is a const_int which gives
++the offset in bytes into the entire argument of where this part starts.
++As a special exception the first expr_list in the parallel
++RTX may have a first operand of zero. This indicates that the entire
++argument is also stored on the stack.
++
++The last time this macro is called, it is called with MODE == VOIDmode,
++and its result is passed to the call or call_value
++pattern as operands 2 and 3 respectively.
++
++The usual way to make the ISO library 'stdarg.h' work on a machine
++where some arguments are usually passed in registers, is to cause
++nameless arguments to be passed on the stack instead. This is done
++by making FUNCTION_ARG return 0 whenever NAMED is 0.
++
++You may use the macro MUST_PASS_IN_STACK (MODE, TYPE)
++in the definition of this macro to determine if this argument is of a
++type that must be passed in the stack. If REG_PARM_STACK_SPACE
++is not defined and FUNCTION_ARG returns nonzero for such an
++argument, the compiler will abort. If REG_PARM_STACK_SPACE is
++defined, the argument will be computed in the stack and then loaded into
++a register. */
++
++#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
++ avr32_function_arg(&(CUM), MODE, TYPE, NAMED)
++
++/*
++A C type for declaring a variable that is used as the first argument of
++FUNCTION_ARG and other related values. For some target machines,
++the type int suffices and can hold the number of bytes of
++argument so far.
++
++There is no need to record in CUMULATIVE_ARGS anything about the
++arguments that have been passed on the stack. The compiler has other
++variables to keep track of that. For target machines on which all
++arguments are passed on the stack, there is no need to store anything in
++CUMULATIVE_ARGS; however, the data structure must exist and
++should not be empty, so use int.
++*/
++typedef struct avr32_args
++{
++ /* Index representing the argument register the current function argument
++ will occupy */
++ int index;
++ /* A mask with bits representing the argument registers: if a bit is set
++ then this register is used for an argument */
++ int used_index;
++ /* TRUE if this function has anonymous arguments */
++ int uses_anonymous_args;
++ /* The size in bytes of the named arguments pushed on the stack */
++ int stack_pushed_args_size;
++ /* Set to true if this function needs a Return Value Pointer */
++ int use_rvp;
++ /* Set to true if function is a flashvault function. */
++ int flashvault_func;
++
++} CUMULATIVE_ARGS;
++
++
++#define FIRST_CUM_REG_INDEX 0
++#define LAST_CUM_REG_INDEX 4
++#define GET_REG_INDEX(CUM) ((CUM)->index)
++#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX));
++#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX)))
++#define SET_USED_INDEX(CUM, INDEX) \
++ do \
++ { \
++ if (INDEX >= 0) \
++ (CUM)->used_index |= (1 << (INDEX)); \
++ } \
++ while (0)
++#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0)
++
++/*
++ A C statement (sans semicolon) for initializing the variable cum for the
++ state at the beginning of the argument list. The variable has type
++ CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of
++ the function which will receive the args, or 0 if the args are to a compiler
++ support library function. For direct calls that are not libcalls, FNDECL
++ contain the declaration node of the function. FNDECL is also set when
++ INIT_CUMULATIVE_ARGS is used to find arguments for the function being
++ compiled. N_NAMED_ARGS is set to the number of named arguments, including a
++ structure return address if it is passed as a parameter, when making a call.
++ When processing incoming arguments, N_NAMED_ARGS is set to -1.
++
++ When processing a call to a compiler support library function, LIBNAME
++ identifies which one. It is a symbol_ref rtx which contains the name of the
++ function, as a string. LIBNAME is 0 when an ordinary C function call is
++ being processed. Thus, each time this macro is called, either LIBNAME or
++ FNTYPE is nonzero, but never both of them at once.
++*/
++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
++ avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL)
++
++/*
++A C statement (sans semicolon) to update the summarizer variable
++CUM to advance past an argument in the argument list. The
++values MODE, TYPE and NAMED describe that argument.
++Once this is done, the variable CUM is suitable for analyzing
++the following argument with FUNCTION_ARG, etc.
++
++This macro need not do anything if the argument in question was passed
++on the stack. The compiler knows how to track the amount of stack space
++used for arguments without any special help.
++*/
++#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
++ avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED)
++
++/*
++If defined, a C expression which determines whether, and in which direction,
++to pad out an argument with extra space. The value should be of type
++enum direction: either 'upward' to pad above the argument,
++'downward' to pad below, or 'none' to inhibit padding.
++
++The amount of padding is always just enough to reach the next
++multiple of FUNCTION_ARG_BOUNDARY; this macro does not control
++it.
++
++This macro has a default definition which is right for most systems.
++For little-endian machines, the default is to pad upward. For
++big-endian machines, the default is to pad downward for an argument of
++constant size shorter than an int, and upward otherwise.
++*/
++#define FUNCTION_ARG_PADDING(MODE, TYPE) \
++ avr32_function_arg_padding(MODE, TYPE)
++
++/*
++ Specify padding for the last element of a block move between registers
++ and memory. First is nonzero if this is the only element. Defining
++ this macro allows better control of register function parameters on
++ big-endian machines, without using PARALLEL rtl. In particular,
++ MUST_PASS_IN_STACK need not test padding and mode of types in registers,
++ as there is no longer a "wrong" part of a register; For example, a three
++ byte aggregate may be passed in the high part of a register if so required.
++*/
++#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
++ avr32_function_arg_padding(MODE, TYPE)
++
++/*
++If defined, a C expression which determines whether the default
++implementation of va_arg will attempt to pad down before reading the
++next argument, if that argument is smaller than its aligned space as
++controlled by PARM_BOUNDARY. If this macro is not defined, all such
++arguments are padded down if BYTES_BIG_ENDIAN is true.
++*/
++#define PAD_VARARGS_DOWN \
++ (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
++
++/*
++A C expression that is nonzero if REGNO is the number of a hard
++register in which function arguments are sometimes passed. This does
++not include implicit arguments such as the static chain and
++the structure-value address. On many machines, no registers can be
++used for this purpose since all function arguments are pushed on the
++stack.
++*/
++/*
++ Use r8 - r12 for function arguments.
++*/
++#define FUNCTION_ARG_REGNO_P(REGNO) \
++ (REGNO >= 3 && REGNO <= 7)
++
++/* Number of registers used for passing function arguments */
++#define NUM_ARG_REGS 5
++
++/*
++If defined, the order in which arguments are loaded into their
++respective argument registers is reversed so that the last
++argument is loaded first. This macro only affects arguments
++passed in registers.
++*/
++/* #define LOAD_ARGS_REVERSED */
++
++/** How Scalar Function Values Are Returned **/
++
++/* AVR32 is using r12 as return register. */
++#define RET_REGISTER (15 - 12)
++
++/*
++A C expression to create an RTX representing the place where a library
++function returns a value of mode MODE. If the precise function
++being called is known, FUNC is a tree node
++(FUNCTION_DECL) for it; otherwise, func is a null
++pointer. This makes it possible to use a different value-returning
++convention for specific functions when all their calls are
++known.
++
++Note that "library function" in this context means a compiler
++support routine, used to perform arithmetic, whose name is known
++specially by the compiler and was not mentioned in the C code being
++compiled.
++
++The definition of LIBRARY_VALUE need not be concerned aggregate
++data types, because none of the library functions returns such types.
++*/
++#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE)
++
++/*
++A C expression that is nonzero if REGNO is the number of a hard
++register in which the values of called function may come back.
++
++A register whose use for returning values is limited to serving as the
++second of a pair (for a value of type double, say) need not be
++recognized by this macro. So for most machines, this definition
++suffices:
++ #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
++
++If the machine has register windows, so that the caller and the called
++function use different registers for the return value, this macro
++should recognize only the caller's register numbers.
++*/
++/*
++ When returning a value of mode DImode, r11:r10 is used, else r12 is used.
++*/
++#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \
++ || (REGNO) == INTERNAL_REGNUM(11))
++
++
++/** How Large Values Are Returned **/
++
++
++/*
++Define this macro to be 1 if all structure and union return values must be
++in memory. Since this results in slower code, this should be defined
++only if needed for compatibility with other compilers or with an ABI.
++If you define this macro to be 0, then the conventions used for structure
++and union return values are decided by the RETURN_IN_MEMORY macro.
++
++If not defined, this defaults to the value 1.
++*/
++#define DEFAULT_PCC_STRUCT_RETURN 0
++
++
++
++
++/** Generating Code for Profiling **/
++
++/*
++A C statement or compound statement to output to FILE some
++assembler code to call the profiling subroutine mcount.
++
++The details of how mcount expects to be called are determined by
++your operating system environment, not by GCC. To figure them out,
++compile a small program for profiling using the system's installed C
++compiler and look at the assembler code that results.
++
++Older implementations of mcount expect the address of a counter
++variable to be loaded into some register. The name of this variable is
++'LP' followed by the number LABELNO, so you would generate
++the name using 'LP%d' in a fprintf.
++*/
++/* ToDo: fixme */
++#ifndef FUNCTION_PROFILER
++#define FUNCTION_PROFILER(FILE, LABELNO) \
++ fprintf((FILE), "/* profiler %d */", (LABELNO))
++#endif
++
++
++/*****************************************************************************
++ * Trampolines for Nested Functions *
++ *****************************************************************************/
++
++/*
++A C statement to output, on the stream FILE, assembler code for a
++block of data that contains the constant parts of a trampoline. This
++code should not include a label - the label is taken care of
++automatically.
++
++If you do not define this macro, it means no template is needed
++for the target. Do not define this macro on systems where the block move
++code to copy the trampoline into place would be larger than the code
++to generate it on the spot.
++*/
++/* ToDo: correct? */
++#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE);
++
++
++/*
++A C expression for the size in bytes of the trampoline, as an integer.
++*/
++/* ToDo: fixme */
++#define TRAMPOLINE_SIZE 0x0C
++
++/*
++Alignment required for trampolines, in bits.
++
++If you don't define this macro, the value of BIGGEST_ALIGNMENT
++is used for aligning trampolines.
++*/
++#define TRAMPOLINE_ALIGNMENT 16
++
++/*
++A C statement to initialize the variable parts of a trampoline.
++ADDR is an RTX for the address of the trampoline; FNADDR is
++an RTX for the address of the nested function; STATIC_CHAIN is an
++RTX for the static chain value that should be passed to the function
++when it is called.
++*/
++#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \
++ avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN)
++
++
++/******************************************************************************
++ * Implicit Calls to Library Routines
++ *****************************************************************************/
++
++/* Tail calling. */
++
++/* A C expression that evaluates to true if it is ok to perform a sibling
++ call to DECL. */
++#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
++
++#define OVERRIDE_OPTIONS avr32_override_options ()
++
++#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) avr32_optimization_options (LEVEL, SIZE)
++
++/******************************************************************************
++ * Addressing Modes
++ *****************************************************************************/
++
++/*
++A C expression that is nonzero if the machine supports pre-increment,
++pre-decrement, post-increment, or post-decrement addressing respectively.
++*/
++/*
++ AVR32 supports Rp++ and --Rp
++*/
++#define HAVE_PRE_INCREMENT 0
++#define HAVE_PRE_DECREMENT 1
++#define HAVE_POST_INCREMENT 1
++#define HAVE_POST_DECREMENT 0
++
++/*
++A C expression that is nonzero if the machine supports pre- or
++post-address side-effect generation involving constants other than
++the size of the memory operand.
++*/
++#define HAVE_PRE_MODIFY_DISP 0
++#define HAVE_POST_MODIFY_DISP 0
++
++/*
++A C expression that is nonzero if the machine supports pre- or
++post-address side-effect generation involving a register displacement.
++*/
++#define HAVE_PRE_MODIFY_REG 0
++#define HAVE_POST_MODIFY_REG 0
++
++/*
++A C expression that is 1 if the RTX X is a constant which
++is a valid address. On most machines, this can be defined as
++CONSTANT_P (X), but a few machines are more restrictive
++in which constant addresses are supported.
++
++CONSTANT_P accepts integer-values expressions whose values are
++not explicitly known, such as symbol_ref, label_ref, and
++high expressions and const arithmetic expressions, in
++addition to const_int and const_double expressions.
++*/
++#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X)
++
++/*
++A number, the maximum number of registers that can appear in a valid
++memory address. Note that it is up to you to specify a value equal to
++the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever
++accept.
++*/
++#define MAX_REGS_PER_ADDRESS 2
++
++/*
++A C compound statement with a conditional goto LABEL;
++executed if X (an RTX) is a legitimate memory address on the
++target machine for a memory operand of mode MODE.
++
++It usually pays to define several simpler macros to serve as
++subroutines for this one. Otherwise it may be too complicated to
++understand.
++
++This macro must exist in two variants: a strict variant and a
++non-strict one. The strict variant is used in the reload pass. It
++must be defined so that any pseudo-register that has not been
++allocated a hard register is considered a memory reference. In
++contexts where some kind of register is required, a pseudo-register
++with no hard register must be rejected.
++
++The non-strict variant is used in other passes. It must be defined to
++accept all pseudo-registers in every context where some kind of
++register is required.
++
++Compiler source files that want to use the strict variant of this
++macro define the macro REG_OK_STRICT. You should use an
++#ifdef REG_OK_STRICT conditional to define the strict variant
++in that case and the non-strict variant otherwise.
++
++Subroutines to check for acceptable registers for various purposes (one
++for base registers, one for index registers, and so on) are typically
++among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS.
++Then only these subroutine macros need have two variants; the higher
++levels of macros may be the same whether strict or not.
++
++Normally, constant addresses which are the sum of a symbol_ref
++and an integer are stored inside a const RTX to mark them as
++constant. Therefore, there is no need to recognize such sums
++specifically as legitimate addresses. Normally you would simply
++recognize any const as legitimate.
++
++Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant
++sums that are not marked with const. It assumes that a naked
++plus indicates indexing. If so, then you must reject such
++naked constant sums as illegitimate addresses, so that none of them will
++be given to PRINT_OPERAND_ADDRESS.
++
++On some machines, whether a symbolic address is legitimate depends on
++the section that the address refers to. On these machines, define the
++macro ENCODE_SECTION_INFO to store the information into the
++symbol_ref, and then check for it here. When you see a
++const, you will have to look inside it to find the
++symbol_ref in order to determine the section.
++
++The best way to modify the name string is by adding text to the
++beginning, with suitable punctuation to prevent any ambiguity. Allocate
++the new name in saveable_obstack. You will have to modify
++ASM_OUTPUT_LABELREF to remove and decode the added text and
++output the name accordingly, and define STRIP_NAME_ENCODING to
++access the original name string.
++
++You can check the information stored here into the symbol_ref in
++the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and
++PRINT_OPERAND_ADDRESS.
++*/
++#ifdef REG_OK_STRICT
++# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
++ do \
++ { \
++ if (avr32_legitimate_address(MODE, X, 1)) \
++ goto LABEL; \
++ } \
++ while (0)
++#else
++# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
++ do \
++ { \
++ if (avr32_legitimate_address(MODE, X, 0)) \
++ goto LABEL; \
++ } \
++ while (0)
++#endif
++
++
++
++/*
++A C compound statement that attempts to replace X with a valid
++memory address for an operand of mode MODE. win will be a
++C statement label elsewhere in the code; the macro definition may use
++
++ GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
++
++to avoid further processing if the address has become legitimate.
++
++X will always be the result of a call to break_out_memory_refs,
++and OLDX will be the operand that was given to that function to produce
++X.
++
++The code generated by this macro should not alter the substructure of
++X. If it transforms X into a more legitimate form, it
++should assign X (which will always be a C variable) a new value.
++
++It is not necessary for this macro to come up with a legitimate
++address. The compiler has standard ways of doing so in all cases. In
++fact, it is safe for this macro to do nothing. But often a
++machine-dependent strategy can generate better code.
++*/
++#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
++ do \
++ { \
++ if (GET_CODE(X) == PLUS \
++ && GET_CODE(XEXP(X, 0)) == REG \
++ && GET_CODE(XEXP(X, 1)) == CONST_INT \
++ && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \
++ 'K', "Ks16")) \
++ { \
++ rtx index = force_reg(SImode, XEXP(X, 1)); \
++ X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \
++ } \
++ GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \
++ } \
++ while(0)
++
++
++/*
++A C statement or compound statement with a conditional
++goto LABEL; executed if memory address X (an RTX) can have
++different meanings depending on the machine mode of the memory
++reference it is used for or if the address is valid for some modes
++but not others.
++
++Autoincrement and autodecrement addresses typically have mode-dependent
++effects because the amount of the increment or decrement is the size
++of the operand being addressed. Some machines have other mode-dependent
++addresses. Many RISC machines have no mode-dependent addresses.
++
++You may assume that ADDR is a valid address for the machine.
++*/
++#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
++ do \
++ { \
++ if (GET_CODE (ADDR) == POST_INC \
++ || GET_CODE (ADDR) == PRE_DEC) \
++ goto LABEL; \
++ } \
++ while (0)
++
++/*
++A C expression that is nonzero if X is a legitimate constant for
++an immediate operand on the target machine. You can assume that
++X satisfies CONSTANT_P, so you need not check this. In fact,
++'1' is a suitable definition for this macro on machines where
++anything CONSTANT_P is valid.
++*/
++#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X)
++
++
++/******************************************************************************
++ * Condition Code Status
++ *****************************************************************************/
++
++/*
++C code for a data type which is used for declaring the mdep
++component of cc_status. It defaults to int.
++
++This macro is not used on machines that do not use cc0.
++*/
++
++typedef struct
++{
++ int flags;
++ rtx value;
++ int cond_exec_cmp_clobbered;
++} avr32_status_reg;
++
++
++#define CC_STATUS_MDEP avr32_status_reg
++
++/*
++A C expression to initialize the mdep field to "empty".
++The default definition does nothing, since most machines don't use
++the field anyway. If you want to use the field, you should probably
++define this macro to initialize it.
++
++This macro is not used on machines that do not use cc0.
++*/
++
++#define CC_STATUS_MDEP_INIT \
++ (cc_status.mdep.flags = CC_NONE , cc_status.mdep.cond_exec_cmp_clobbered = 0, cc_status.mdep.value = 0)
++
++/*
++A C compound statement to set the components of cc_status
++appropriately for an insn INSN whose body is EXP. It is
++this macro's responsibility to recognize insns that set the condition
++code as a byproduct of other activity as well as those that explicitly
++set (cc0).
++
++This macro is not used on machines that do not use cc0.
++
++If there are insns that do not set the condition code but do alter
++other machine registers, this macro must check to see whether they
++invalidate the expressions that the condition code is recorded as
++reflecting. For example, on the 68000, insns that store in address
++registers do not set the condition code, which means that usually
++NOTICE_UPDATE_CC can leave cc_status unaltered for such
++insns. But suppose that the previous insn set the condition code
++based on location 'a4@@(102)' and the current insn stores a new
++value in 'a4'. Although the condition code is not changed by
++this, it will no longer be true that it reflects the contents of
++'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter
++cc_status in this case to say that nothing is known about the
++condition code value.
++
++The definition of NOTICE_UPDATE_CC must be prepared to deal
++with the results of peephole optimization: insns whose patterns are
++parallel RTXs containing various reg, mem or
++constants which are just the operands. The RTL structure of these
++insns is not sufficient to indicate what the insns actually do. What
++NOTICE_UPDATE_CC should do when it sees one is just to run
++CC_STATUS_INIT.
++
++A possible definition of NOTICE_UPDATE_CC is to call a function
++that looks at an attribute (see Insn Attributes) named, for example,
++'cc'. This avoids having detailed information about patterns in
++two places, the 'md' file and in NOTICE_UPDATE_CC.
++*/
++
++#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN)
++
++
++
++
++/******************************************************************************
++ * Describing Relative Costs of Operations
++ *****************************************************************************/
++
++
++
++/*
++A C expression for the cost of moving data of mode MODE from a
++register in class FROM to one in class TO. The classes are
++expressed using the enumeration values such as GENERAL_REGS. A
++value of 2 is the default; other values are interpreted relative to
++that.
++
++It is not required that the cost always equal 2 when FROM is the
++same as TO; on some machines it is expensive to move between
++registers if they are not general registers.
++
++If reload sees an insn consisting of a single set between two
++hard registers, and if REGISTER_MOVE_COST applied to their
++classes returns a value of 2, reload does not check to ensure that the
++constraints of the insn are met. Setting a cost of other than 2 will
++allow reload to verify that the constraints are met. You should do this
++if the movm pattern's constraints do not allow such copying.
++*/
++#define REGISTER_MOVE_COST(MODE, FROM, TO) \
++ ((GET_MODE_SIZE(MODE) <= 4) ? 2: \
++ (GET_MODE_SIZE(MODE) <= 8) ? 3: \
++ 4)
++
++/*
++A C expression for the cost of moving data of mode MODE between a
++register of class CLASS and memory; IN is zero if the value
++is to be written to memory, nonzero if it is to be read in. This cost
++is relative to those in REGISTER_MOVE_COST. If moving between
++registers and memory is more expensive than between two registers, you
++should define this macro to express the relative cost.
++
++If you do not define this macro, GCC uses a default cost of 4 plus
++the cost of copying via a secondary reload register, if one is
++needed. If your machine requires a secondary reload register to copy
++between memory and a register of CLASS but the reload mechanism is
++more complex than copying via an intermediate, define this macro to
++reflect the actual cost of the move.
++
++GCC defines the function memory_move_secondary_cost if
++secondary reloads are needed. It computes the costs due to copying via
++a secondary register. If your machine copies from memory using a
++secondary register in the conventional way but the default base value of
++4 is not correct for your machine, define this macro to add some other
++value to the result of that function. The arguments to that function
++are the same as to this macro.
++*/
++/*
++ Memory moves are costly
++*/
++#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
++ (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \
++ (GET_MODE_SIZE(MODE) > 8) ? 6 : \
++ 3) \
++ : ((GET_MODE_SIZE(MODE) > 8) ? 6 : 3)))
++
++/*
++A C expression for the cost of a branch instruction. A value of 1 is
++the default; other values are interpreted relative to that.
++*/
++ /* Try to use conditionals as much as possible */
++#define BRANCH_COST(speed_p, predictable_p) (TARGET_BRANCH_PRED ? 3 : 4)
++
++/*A C expression for the maximum number of instructions to execute via conditional
++ execution instructions instead of a branch. A value of BRANCH_COST+1 is the default
++ if the machine does not use cc0, and 1 if it does use cc0.*/
++#define MAX_CONDITIONAL_EXECUTE 4
++
++/*
++Define this macro as a C expression which is nonzero if accessing less
++than a word of memory (i.e.: a char or a short) is no
++faster than accessing a word of memory, i.e., if such access
++require more than one instruction or if there is no difference in cost
++between byte and (aligned) word loads.
++
++When this macro is not defined, the compiler will access a field by
++finding the smallest containing object; when it is defined, a fullword
++load will be used if alignment permits. Unless bytes accesses are
++faster than word accesses, using word accesses is preferable since it
++may eliminate subsequent memory access if subsequent accesses occur to
++other fields in the same word of the structure, but to different bytes.
++*/
++#define SLOW_BYTE_ACCESS 1
++
++
++/*
++Define this macro if it is as good or better to call a constant
++function address than to call an address kept in a register.
++*/
++#define NO_FUNCTION_CSE
++
++
++/******************************************************************************
++ * Adjusting the Instruction Scheduler
++ *****************************************************************************/
++
++/*****************************************************************************
++ * Dividing the Output into Sections (Texts, Data, ...) *
++ *****************************************************************************/
++
++/*
++A C expression whose value is a string, including spacing, containing the
++assembler operation that should precede instructions and read-only data.
++Normally "\t.text" is right.
++*/
++#define TEXT_SECTION_ASM_OP "\t.text"
++/*
++A C statement that switches to the default section containing instructions.
++Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP
++is enough. The MIPS port uses this to sort all functions after all data
++declarations.
++*/
++/* #define TEXT_SECTION */
++
++/*
++A C expression whose value is a string, including spacing, containing the
++assembler operation to identify the following data as writable initialized
++data. Normally "\t.data" is right.
++*/
++#define DATA_SECTION_ASM_OP "\t.data"
++
++/*
++If defined, a C expression whose value is a string, including spacing,
++containing the assembler operation to identify the following data as
++shared data. If not defined, DATA_SECTION_ASM_OP will be used.
++*/
++
++/*
++A C expression whose value is a string, including spacing, containing
++the assembler operation to identify the following data as read-only
++initialized data.
++*/
++#undef READONLY_DATA_SECTION_ASM_OP
++#define READONLY_DATA_SECTION_ASM_OP \
++ ((TARGET_USE_RODATA_SECTION) ? \
++ "\t.section\t.rodata" : \
++ TEXT_SECTION_ASM_OP )
++
++
++/*
++If defined, a C expression whose value is a string, including spacing,
++containing the assembler operation to identify the following data as
++uninitialized global data. If not defined, and neither
++ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined,
++uninitialized global data will be output in the data section if
++-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be
++used.
++*/
++#define BSS_SECTION_ASM_OP "\t.section\t.bss"
++
++/*
++If defined, a C expression whose value is a string, including spacing,
++containing the assembler operation to identify the following data as
++uninitialized global shared data. If not defined, and
++BSS_SECTION_ASM_OP is, the latter will be used.
++*/
++/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/
++/*
++If defined, a C expression whose value is a string, including spacing,
++containing the assembler operation to identify the following data as
++initialization code. If not defined, GCC will assume such a section does
++not exist.
++*/
++#undef INIT_SECTION_ASM_OP
++#define INIT_SECTION_ASM_OP "\t.section\t.init"
++
++/*
++If defined, a C expression whose value is a string, including spacing,
++containing the assembler operation to identify the following data as
++finalization code. If not defined, GCC will assume such a section does
++not exist.
++*/
++#undef FINI_SECTION_ASM_OP
++#define FINI_SECTION_ASM_OP "\t.section\t.fini"
++
++/*
++If defined, an ASM statement that switches to a different section
++via SECTION_OP, calls FUNCTION, and switches back to
++the text section. This is used in crtstuff.c if
++INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls
++to initialization and finalization functions from the init and fini
++sections. By default, this macro uses a simple function call. Some
++ports need hand-crafted assembly code to avoid dependencies on
++registers initialized in the function prologue or to ensure that
++constant pools don't end up too far way in the text section.
++*/
++#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
++ asm ( SECTION_OP "\n" \
++ "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \
++ TEXT_SECTION_ASM_OP);
++
++
++/*
++Define this macro to be an expression with a nonzero value if jump
++tables (for tablejump insns) should be output in the text
++section, along with the assembler instructions. Otherwise, the
++readonly data section is used.
++
++This macro is irrelevant if there is no separate readonly data section.
++*/
++/* Put jump tables in text section if we have caches. Otherwise assume that
++ loading data from code memory is slow. */
++#define JUMP_TABLES_IN_TEXT_SECTION \
++ (TARGET_CACHES ? 1 : 0)
++
++
++/******************************************************************************
++ * Position Independent Code (PIC)
++ *****************************************************************************/
++
++#ifndef AVR32_ALWAYS_PIC
++#define AVR32_ALWAYS_PIC 0
++#endif
++
++/* GOT is set to r6 */
++#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6)
++
++/*
++A C expression that is nonzero if X is a legitimate immediate
++operand on the target machine when generating position independent code.
++You can assume that X satisfies CONSTANT_P, so you need not
++check this. You can also assume flag_pic is true, so you need not
++check it either. You need not define this macro if all constants
++(including SYMBOL_REF) can be immediate operands when generating
++position independent code.
++*/
++/* We can't directly access anything that contains a symbol,
++ nor can we indirect via the constant pool. */
++#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X)
++
++
++/* We need to know when we are making a constant pool; this determines
++ whether data needs to be in the GOT or can be referenced via a GOT
++ offset. */
++extern int making_const_table;
++
++/******************************************************************************
++ * Defining the Output Assembler Language
++ *****************************************************************************/
++
++
++/*
++A C string constant describing how to begin a comment in the target
++assembler language. The compiler assumes that the comment will end at
++the end of the line.
++*/
++#define ASM_COMMENT_START "# "
++
++/*
++A C string constant for text to be output before each asm
++statement or group of consecutive ones. Normally this is
++"#APP", which is a comment that has no effect on most
++assemblers but tells the GNU assembler that it must check the lines
++that follow for all valid assembler constructs.
++*/
++#undef ASM_APP_ON
++#define ASM_APP_ON "#APP\n"
++
++/*
++A C string constant for text to be output after each asm
++statement or group of consecutive ones. Normally this is
++"#NO_APP", which tells the GNU assembler to resume making the
++time-saving assumptions that are valid for ordinary compiler output.
++*/
++#undef ASM_APP_OFF
++#define ASM_APP_OFF "#NO_APP\n"
++
++
++
++#define FILE_ASM_OP "\t.file\n"
++#define IDENT_ASM_OP "\t.ident\t"
++#define SET_ASM_OP "\t.set\t"
++
++
++/*
++ * Output assembly directives to switch to section name. The section
++ * should have attributes as specified by flags, which is a bit mask
++ * of the SECTION_* flags defined in 'output.h'. If align is nonzero,
++ * it contains an alignment in bytes to be used for the section,
++ * otherwise some target default should be used. Only targets that
++ * must specify an alignment within the section directive need pay
++ * attention to align -- we will still use ASM_OUTPUT_ALIGN.
++ *
++ * NOTE: This one must not be moved to avr32.c
++ */
++#undef TARGET_ASM_NAMED_SECTION
++#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
++
++
++/*
++You may define this macro as a C expression. You should define the
++expression to have a nonzero value if GCC should output the constant
++pool for a function before the code for the function, or a zero value if
++GCC should output the constant pool after the function. If you do
++not define this macro, the usual case, GCC will output the constant
++pool before the function.
++*/
++#define CONSTANT_POOL_BEFORE_FUNCTION 0
++
++
++/*
++Define this macro as a C expression which is nonzero if the constant
++EXP, of type tree, should be output after the code for a
++function. The compiler will normally output all constants before the
++function; you need not define this macro if this is OK.
++*/
++#define CONSTANT_AFTER_FUNCTION_P(EXP) 1
++
++
++/*
++Define this macro as a C expression which is nonzero if C is
++as a logical line separator by the assembler. STR points to the
++position in the string where C was found; this can be used if a
++line separator uses multiple characters.
++
++If you do not define this macro, the default is that only
++the character ';' is treated as a logical line separator.
++*/
++#define IS_ASM_LOGICAL_LINE_SEPARATOR(C,STR) (((C) == '\n') || ((C) == ';'))
++
++
++/** Output of Uninitialized Variables **/
++
++/*
++A C statement (sans semicolon) to output to the stdio stream
++STREAM the assembler definition of a common-label named
++NAME whose size is SIZE bytes. The variable ROUNDED
++is the size rounded up to whatever alignment the caller wants.
++
++Use the expression assemble_name(STREAM, NAME) to
++output the name itself; before and after that, output the additional
++assembler syntax for defining the name, and a newline.
++
++This macro controls how the assembler definitions of uninitialized
++common global variables are output.
++*/
++/*
++#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
++ avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED)
++*/
++
++#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
++ do \
++ { \
++ fputs ("\t.comm ", (FILE)); \
++ assemble_name ((FILE), (NAME)); \
++ fprintf ((FILE), ",%d\n", (SIZE)); \
++ } \
++ while (0)
++
++/*
++ * Like ASM_OUTPUT_BSS except takes the required alignment as a
++ * separate, explicit argument. If you define this macro, it is used
++ * in place of ASM_OUTPUT_BSS, and gives you more flexibility in
++ * handling the required alignment of the variable. The alignment is
++ * specified as the number of bits.
++ *
++ * Try to use function asm_output_aligned_bss defined in file varasm.c
++ * when defining this macro.
++ */
++#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \
++ asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT)
++
++/*
++A C statement (sans semicolon) to output to the stdio stream
++STREAM the assembler definition of a local-common-label named
++NAME whose size is SIZE bytes. The variable ROUNDED
++is the size rounded up to whatever alignment the caller wants.
++
++Use the expression assemble_name(STREAM, NAME) to
++output the name itself; before and after that, output the additional
++assembler syntax for defining the name, and a newline.
++
++This macro controls how the assembler definitions of uninitialized
++static variables are output.
++*/
++#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
++ do \
++ { \
++ fputs ("\t.lcomm ", (FILE)); \
++ assemble_name ((FILE), (NAME)); \
++ fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \
++ } \
++ while (0)
++
++
++/*
++A C statement (sans semicolon) to output to the stdio stream
++STREAM the assembler definition of a label named NAME.
++Use the expression assemble_name(STREAM, NAME) to
++output the name itself; before and after that, output the additional
++assembler syntax for defining the name, and a newline.
++*/
++#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME)
++
++/* A C string containing the appropriate assembler directive to
++ * specify the size of a symbol, without any arguments. On systems
++ * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"';
++ * on other systems, the default is not to define this macro.
++ *
++ * Define this macro only if it is correct to use the default
++ * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and
++ * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own
++ * custom definitions of those macros, or if you do not need explicit
++ * symbol sizes at all, do not define this macro.
++ */
++#define SIZE_ASM_OP "\t.size\t"
++
++
++/*
++A C statement (sans semicolon) to output to the stdio stream
++STREAM some commands that will make the label NAME global;
++that is, available for reference from other files. Use the expression
++assemble_name(STREAM, NAME) to output the name
++itself; before and after that, output the additional assembler syntax
++for making that name global, and a newline.
++*/
++#define GLOBAL_ASM_OP "\t.global\t"
++
++
++
++/*
++A C expression which evaluates to true if the target supports weak symbols.
++
++If you don't define this macro, defaults.h provides a default
++definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL
++is defined, the default definition is '1'; otherwise, it is
++'0'. Define this macro if you want to control weak symbol support
++with a compiler flag such as -melf.
++*/
++#define SUPPORTS_WEAK 1
++
++/*
++A C statement (sans semicolon) to output to the stdio stream
++STREAM a reference in assembler syntax to a label named
++NAME. This should add '_' to the front of the name, if that
++is customary on your operating system, as it is in most Berkeley Unix
++systems. This macro is used in assemble_name.
++*/
++#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
++ avr32_asm_output_labelref(STREAM, NAME)
++
++
++
++/*
++A C expression to assign to OUTVAR (which is a variable of type
++char *) a newly allocated string made from the string
++NAME and the number NUMBER, with some suitable punctuation
++added. Use alloca to get space for the string.
++
++The string will be used as an argument to ASM_OUTPUT_LABELREF to
++produce an assembler label for an internal static variable whose name is
++NAME. Therefore, the string must be such as to result in valid
++assembler code. The argument NUMBER is different each time this
++macro is executed; it prevents conflicts between similarly-named
++internal static variables in different scopes.
++
++Ideally this string should not be a valid C identifier, to prevent any
++conflict with the user's own symbols. Most assemblers allow periods
++or percent signs in assembler symbols; putting at least one of these
++between the name and the number will suffice.
++*/
++#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \
++ do \
++ { \
++ (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \
++ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \
++ } \
++ while (0)
++
++
++/** Macros Controlling Initialization Routines **/
++
++
++/*
++If defined, main will not call __main as described above.
++This macro should be defined for systems that control start-up code
++on a symbol-by-symbol basis, such as OSF/1, and should not
++be defined explicitly for systems that support INIT_SECTION_ASM_OP.
++*/
++/*
++ __main is not defined when debugging.
++*/
++#define HAS_INIT_SECTION
++
++
++/** Output of Assembler Instructions **/
++
++/*
++A C initializer containing the assembler's names for the machine
++registers, each one as a C string constant. This is what translates
++register numbers in the compiler into assembler language.
++*/
++
++#define REGISTER_NAMES \
++{ \
++ "pc", "lr", \
++ "sp", "r12", \
++ "r11", "r10", \
++ "r9", "r8", \
++ "r7", "r6", \
++ "r5", "r4", \
++ "r3", "r2", \
++ "r1", "r0", \
++}
++
++/*
++A C compound statement to output to stdio stream STREAM the
++assembler syntax for an instruction operand X. X is an
++RTL expression.
++
++CODE is a value that can be used to specify one of several ways
++of printing the operand. It is used when identical operands must be
++printed differently depending on the context. CODE comes from
++the '%' specification that was used to request printing of the
++operand. If the specification was just '%digit' then
++CODE is 0; if the specification was '%ltr digit'
++then CODE is the ASCII code for ltr.
++
++If X is a register, this macro should print the register's name.
++The names can be found in an array reg_names whose type is
++char *[]. reg_names is initialized from REGISTER_NAMES.
++
++When the machine description has a specification '%punct'
++(a '%' followed by a punctuation character), this macro is called
++with a null pointer for X and the punctuation character for
++CODE.
++*/
++#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE)
++
++/* A C statement to be executed just prior to the output of
++ assembler code for INSN, to modify the extracted operands so
++ they will be output differently.
++
++ Here the argument OPVEC is the vector containing the operands
++ extracted from INSN, and NOPERANDS is the number of elements of
++ the vector which contain meaningful data for this insn.
++ The contents of this vector are what will be used to convert the insn
++ template into assembler code, so you can change the assembler output
++ by changing the contents of the vector. */
++#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
++ avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
++
++/*
++A C expression which evaluates to true if CODE is a valid
++punctuation character for use in the PRINT_OPERAND macro. If
++PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no
++punctuation characters (except for the standard one, '%') are used
++in this way.
++*/
++#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
++ (((CODE) == '?') \
++ || ((CODE) == '!'))
++
++/*
++A C compound statement to output to stdio stream STREAM the
++assembler syntax for an instruction operand that is a memory reference
++whose address is X. X is an RTL expression.
++
++On some machines, the syntax for a symbolic address depends on the
++section that the address refers to. On these machines, define the macro
++ENCODE_SECTION_INFO to store the information into the
++symbol_ref, and then check for it here. (see Assembler Format.)
++*/
++#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X)
++
++
++/** Output of Dispatch Tables **/
++
++/*
++ * A C statement to output to the stdio stream stream an assembler
++ * pseudo-instruction to generate a difference between two
++ * labels. value and rel are the numbers of two internal labels. The
++ * definitions of these labels are output using
++ * (*targetm.asm_out.internal_label), and they must be printed in the
++ * same way here. For example,
++ *
++ * fprintf (stream, "\t.word L%d-L%d\n",
++ * value, rel)
++ *
++ * You must provide this macro on machines where the addresses in a
++ * dispatch table are relative to the table's own address. If defined,
++ * GCC will also use this macro on all machines when producing
++ * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that
++ * the mode and flags can be read.
++ */
++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
++ fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
++
++/*
++This macro should be provided on machines where the addresses
++in a dispatch table are absolute.
++
++The definition should be a C statement to output to the stdio stream
++STREAM an assembler pseudo-instruction to generate a reference to
++a label. VALUE is the number of an internal label whose
++definition is output using ASM_OUTPUT_INTERNAL_LABEL.
++For example,
++
++fprintf(STREAM, "\t.word L%d\n", VALUE)
++*/
++
++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
++ fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
++
++/** Assembler Commands for Exception Regions */
++
++/* ToDo: All of this subsection */
++
++/** Assembler Commands for Alignment */
++
++
++/*
++A C statement to output to the stdio stream STREAM an assembler
++command to advance the location counter to a multiple of 2 to the
++POWER bytes. POWER will be a C expression of type int.
++*/
++#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
++ do \
++ { \
++ if ((POWER) != 0) \
++ fprintf(STREAM, "\t.align\t%d\n", POWER); \
++ } \
++ while (0)
++
++/*
++Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if
++necessary.
++*/
++#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \
++ fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER))
++
++
++
++/******************************************************************************
++ * Controlling Debugging Information Format
++ *****************************************************************************/
++
++/* How to renumber registers for dbx and gdb. */
++#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO)
++
++/* The DWARF 2 CFA column which tracks the return address. */
++#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM)
++
++/*
++Define this macro if GCC should produce dwarf version 2 format
++debugging output in response to the -g option.
++
++To support optional call frame debugging information, you must also
++define INCOMING_RETURN_ADDR_RTX and either set
++RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the
++prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save
++as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't.
++*/
++#define DWARF2_DEBUGGING_INFO 1
++
++
++#define DWARF2_ASM_LINE_DEBUG_INFO 1
++#define DWARF2_FRAME_INFO 1
++
++
++/******************************************************************************
++ * Miscellaneous Parameters
++ *****************************************************************************/
++
++/* ToDo: a lot */
++
++/*
++An alias for a machine mode name. This is the machine mode that
++elements of a jump-table should have.
++*/
++#define CASE_VECTOR_MODE SImode
++
++/*
++Define this macro to be a C expression to indicate when jump-tables
++should contain relative addresses. If jump-tables never contain
++relative addresses, then you need not define this macro.
++*/
++#define CASE_VECTOR_PC_RELATIVE 0
++
++/* Increase the threshold for using table jumps on the UC arch. */
++#define CASE_VALUES_THRESHOLD (TARGET_BRANCH_PRED ? 4 : 7)
++
++/*
++The maximum number of bytes that a single instruction can move quickly
++between memory and registers or between two memory locations.
++*/
++#define MOVE_MAX (2*UNITS_PER_WORD)
++
++
++/* A C expression that is nonzero if on this machine the number of bits actually used
++ for the count of a shift operation is equal to the number of bits needed to represent
++ the size of the object being shifted. When this macro is nonzero, the compiler will
++ assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and'
++ instructions that truncates the count of a shift operation. On machines that have
++ instructions that act on bit-fields at variable positions, which may include 'bit test'
++ 378 GNU Compiler Collection (GCC) Internals
++ instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations
++ of the values that serve as arguments to bit-field instructions.
++ If both types of instructions truncate the count (for shifts) and position (for bit-field
++ operations), or if no variable-position bit-field instructions exist, you should define
++ this macro.
++ However, on some machines, such as the 80386 and the 680x0, truncation only applies
++ to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_
++ COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file
++ that include the implied truncation of the shift instructions.
++ You need not de ne this macro if it would always have the value of zero. */
++#define SHIFT_COUNT_TRUNCATED 1
++
++/*
++A C expression which is nonzero if on this machine it is safe to
++convert an integer of INPREC bits to one of OUTPREC
++bits (where OUTPREC is smaller than INPREC) by merely
++operating on it as if it had only OUTPREC bits.
++
++On many machines, this expression can be 1.
++
++When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for
++modes for which MODES_TIEABLE_P is 0, suboptimal code can result.
++If this is the case, making TRULY_NOOP_TRUNCATION return 0 in
++such cases may improve things.
++*/
++#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
++
++/*
++An alias for the machine mode for pointers. On most machines, define
++this to be the integer mode corresponding to the width of a hardware
++pointer; SImode on 32-bit machine or DImode on 64-bit machines.
++On some machines you must define this to be one of the partial integer
++modes, such as PSImode.
++
++The width of Pmode must be at least as large as the value of
++POINTER_SIZE. If it is not equal, you must define the macro
++POINTERS_EXTEND_UNSIGNED to specify how pointers are extended
++to Pmode.
++*/
++#define Pmode SImode
++
++/*
++An alias for the machine mode used for memory references to functions
++being called, in call RTL expressions. On most machines this
++should be QImode.
++*/
++#define FUNCTION_MODE SImode
++
++
++#define REG_S_P(x) \
++ (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
++
++
++/* If defined, modifies the length assigned to instruction INSN as a
++ function of the context in which it is used. LENGTH is an lvalue
++ that contains the initially computed length of the insn and should
++ be updated with the correct length of the insn. */
++#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
++ ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH)))
++
++
++#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \
++ (value = 32, (mode == SImode))
++
++#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \
++ (value = 32, (mode == SImode))
++
++#define UNITS_PER_SIMD_WORD(mode) UNITS_PER_WORD
++
++#define STORE_FLAG_VALUE 1
++
++
++/* IF-conversion macros. */
++#define IFCVT_MODIFY_INSN( CE_INFO, PATTERN, INSN ) \
++ { \
++ (PATTERN) = avr32_ifcvt_modify_insn (CE_INFO, PATTERN, INSN, &num_true_changes); \
++ }
++
++#define IFCVT_EXTRA_FIELDS \
++ int num_cond_clobber_insns; \
++ int num_extra_move_insns; \
++ rtx extra_move_insns[MAX_CONDITIONAL_EXECUTE]; \
++ rtx moved_insns[MAX_CONDITIONAL_EXECUTE];
++
++#define IFCVT_INIT_EXTRA_FIELDS( CE_INFO ) \
++ { \
++ (CE_INFO)->num_cond_clobber_insns = 0; \
++ (CE_INFO)->num_extra_move_insns = 0; \
++ }
++
++
++#define IFCVT_MODIFY_CANCEL( CE_INFO ) avr32_ifcvt_modify_cancel (CE_INFO, &num_true_changes)
++
++#define IFCVT_ALLOW_MODIFY_TEST_IN_INSN 1
++#define IFCVT_COND_EXEC_BEFORE_RELOAD (TARGET_COND_EXEC_BEFORE_RELOAD)
++
++enum avr32_builtins
++{
++ AVR32_BUILTIN_MTSR,
++ AVR32_BUILTIN_MFSR,
++ AVR32_BUILTIN_MTDR,
++ AVR32_BUILTIN_MFDR,
++ AVR32_BUILTIN_CACHE,
++ AVR32_BUILTIN_SYNC,
++ AVR32_BUILTIN_SSRF,
++ AVR32_BUILTIN_CSRF,
++ AVR32_BUILTIN_TLBR,
++ AVR32_BUILTIN_TLBS,
++ AVR32_BUILTIN_TLBW,
++ AVR32_BUILTIN_BREAKPOINT,
++ AVR32_BUILTIN_XCHG,
++ AVR32_BUILTIN_LDXI,
++ AVR32_BUILTIN_BSWAP16,
++ AVR32_BUILTIN_BSWAP32,
++ AVR32_BUILTIN_COP,
++ AVR32_BUILTIN_MVCR_W,
++ AVR32_BUILTIN_MVRC_W,
++ AVR32_BUILTIN_MVCR_D,
++ AVR32_BUILTIN_MVRC_D,
++ AVR32_BUILTIN_MULSATHH_H,
++ AVR32_BUILTIN_MULSATHH_W,
++ AVR32_BUILTIN_MULSATRNDHH_H,
++ AVR32_BUILTIN_MULSATRNDWH_W,
++ AVR32_BUILTIN_MULSATWH_W,
++ AVR32_BUILTIN_MACSATHH_W,
++ AVR32_BUILTIN_SATADD_H,
++ AVR32_BUILTIN_SATSUB_H,
++ AVR32_BUILTIN_SATADD_W,
++ AVR32_BUILTIN_SATSUB_W,
++ AVR32_BUILTIN_MULWH_D,
++ AVR32_BUILTIN_MULNWH_D,
++ AVR32_BUILTIN_MACWH_D,
++ AVR32_BUILTIN_MACHH_D,
++ AVR32_BUILTIN_MUSFR,
++ AVR32_BUILTIN_MUSTR,
++ AVR32_BUILTIN_SATS,
++ AVR32_BUILTIN_SATU,
++ AVR32_BUILTIN_SATRNDS,
++ AVR32_BUILTIN_SATRNDU,
++ AVR32_BUILTIN_MEMS,
++ AVR32_BUILTIN_MEMC,
++ AVR32_BUILTIN_MEMT,
++ AVR32_BUILTIN_SLEEP,
++ AVR32_BUILTIN_DELAY_CYCLES
++};
++
++
++#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \
++ ((MODE == SFmode) || (MODE == DFmode))
++
++#define RENAME_LIBRARY_SET ".set"
++
++/* Make ABI_NAME an alias for __GCC_NAME. */
++#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \
++ __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \
++ ".set\t__avr32_" #ABI_NAME \
++ ", __" #GCC_NAME "\n");
++
++/* Give libgcc functions avr32 ABI name. */
++#ifdef L_muldi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64)
++#endif
++#ifdef L_divdi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64)
++#endif
++#ifdef L_udivdi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64)
++#endif
++#ifdef L_moddi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64)
++#endif
++#ifdef L_umoddi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64)
++#endif
++#ifdef L_ashldi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64)
++#endif
++#ifdef L_lshrdi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64)
++#endif
++#ifdef L_ashrdi3
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64)
++#endif
++
++#ifdef L_fixsfdi
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64)
++#endif
++#ifdef L_fixunssfdi
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64)
++#endif
++#ifdef L_floatdidf
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64)
++#endif
++#ifdef L_floatdisf
++#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32)
++#endif
++
++#endif
+--- /dev/null
++++ b/gcc/config/avr32/avr32.md
+@@ -0,0 +1,5198 @@
++;; AVR32 machine description file.
++;; Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
++;;
++;; This file is part of GCC.
++;;
++;; This program is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 2 of the License, or
++;; (at your option) any later version.
++;;
++;; This program is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with this program; if not, write to the Free Software
++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++
++;; -*- Mode: Scheme -*-
++
++(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm"
++ (const_string "alu"))
++
++
++(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,set_z_if_not_v2,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc"
++ (const_string "none"))
++
++
++; NB! Keep this in sync with enum architecture_type in avr32.h
++(define_attr "pipeline" "ap,ucr1,ucr2,ucr2nomul,ucr3,ucr3fp"
++ (const (symbol_ref "avr32_arch->arch_type")))
++
++; Insn length in bytes
++(define_attr "length" ""
++ (const_int 4))
++
++; Signal if an insn is predicable and hence can be conditionally executed.
++(define_attr "predicable" "no,yes" (const_string "no"))
++
++;; Uses of UNSPEC in this file:
++(define_constants
++ [(UNSPEC_PUSHM 0)
++ (UNSPEC_POPM 1)
++ (UNSPEC_UDIVMODSI4_INTERNAL 2)
++ (UNSPEC_DIVMODSI4_INTERNAL 3)
++ (UNSPEC_STM 4)
++ (UNSPEC_LDM 5)
++ (UNSPEC_MOVSICC 6)
++ (UNSPEC_ADDSICC 7)
++ (UNSPEC_COND_MI 8)
++ (UNSPEC_COND_PL 9)
++ (UNSPEC_PIC_SYM 10)
++ (UNSPEC_PIC_BASE 11)
++ (UNSPEC_STORE_MULTIPLE 12)
++ (UNSPEC_STMFP 13)
++ (UNSPEC_FRCPA 14)
++ (UNSPEC_REG_TO_CC 15)
++ (UNSPEC_FORCE_MINIPOOL 16)
++ (UNSPEC_SATS 17)
++ (UNSPEC_SATU 18)
++ (UNSPEC_SATRNDS 19)
++ (UNSPEC_SATRNDU 20)
++ ])
++
++(define_constants
++ [(VUNSPEC_EPILOGUE 0)
++ (VUNSPEC_CACHE 1)
++ (VUNSPEC_MTSR 2)
++ (VUNSPEC_MFSR 3)
++ (VUNSPEC_BLOCKAGE 4)
++ (VUNSPEC_SYNC 5)
++ (VUNSPEC_TLBR 6)
++ (VUNSPEC_TLBW 7)
++ (VUNSPEC_TLBS 8)
++ (VUNSPEC_BREAKPOINT 9)
++ (VUNSPEC_MTDR 10)
++ (VUNSPEC_MFDR 11)
++ (VUNSPEC_MVCR 12)
++ (VUNSPEC_MVRC 13)
++ (VUNSPEC_COP 14)
++ (VUNSPEC_ALIGN 15)
++ (VUNSPEC_POOL_START 16)
++ (VUNSPEC_POOL_END 17)
++ (VUNSPEC_POOL_4 18)
++ (VUNSPEC_POOL_8 19)
++ (VUNSPEC_POOL_16 20)
++ (VUNSPEC_MUSFR 21)
++ (VUNSPEC_MUSTR 22)
++ (VUNSPEC_SYNC_CMPXCHG 23)
++ (VUNSPEC_SYNC_SET_LOCK_AND_LOAD 24)
++ (VUNSPEC_SYNC_STORE_IF_LOCK 25)
++ (VUNSPEC_EH_RETURN 26)
++ (VUNSPEC_FRS 27)
++ (VUNSPEC_CSRF 28)
++ (VUNSPEC_SSRF 29)
++ (VUNSPEC_SLEEP 30)
++ (VUNSPEC_DELAY_CYCLES 31)
++ (VUNSPEC_DELAY_CYCLES_1 32)
++ (VUNSPEC_DELAY_CYCLES_2 33)
++ (VUNSPEC_NOP 34)
++ (VUNSPEC_NOP3 35)
++ ])
++
++(define_constants
++ [
++ ;; R7 = 15-7 = 8
++ (FP_REGNUM 8)
++ ;; Return Register = R12 = 15 - 12 = 3
++ (RETVAL_REGNUM 3)
++ ;; SP = R13 = 15 - 13 = 2
++ (SP_REGNUM 2)
++ ;; LR = R14 = 15 - 14 = 1
++ (LR_REGNUM 1)
++ ;; PC = R15 = 15 - 15 = 0
++ (PC_REGNUM 0)
++ ;; FPSR = GENERAL_REGS + 1 = 17
++ (FPCC_REGNUM 17)
++ ])
++
++
++
++
++;;******************************************************************************
++;; Macros
++;;******************************************************************************
++
++;; Integer Modes for basic alu insns
++(define_mode_iterator INTM [SI HI QI])
++(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")])
++
++;; Move word modes
++(define_mode_iterator MOVM [SI V2HI V4QI])
++
++;; For mov/addcc insns
++(define_mode_iterator ADDCC [SI HI QI])
++(define_mode_iterator MOVCC [SF SI HI QI])
++(define_mode_iterator CMP [DI SI HI QI])
++(define_mode_attr store_postfix [(SF ".w") (SI ".w") (HI ".h") (QI ".b")])
++(define_mode_attr load_postfix [(SF ".w") (SI ".w") (HI ".sh") (QI ".ub")])
++(define_mode_attr load_postfix_s [(SI ".w") (HI ".sh") (QI ".sb")])
++(define_mode_attr load_postfix_u [(SI ".w") (HI ".uh") (QI ".ub")])
++(define_mode_attr pred_mem_constraint [(SF "RKu11") (SI "RKu11") (HI "RKu10") (QI "RKu09")])
++(define_mode_attr cmp_constraint [(DI "rKu20") (SI "rKs21") (HI "r") (QI "r")])
++(define_mode_attr cmp_predicate [(DI "register_immediate_operand")
++ (SI "register_const_int_operand")
++ (HI "register_operand")
++ (QI "register_operand")])
++(define_mode_attr cmp_length [(DI "6")
++ (SI "4")
++ (HI "4")
++ (QI "4")])
++
++;; For all conditional insns
++(define_code_iterator any_cond_b [ge lt geu ltu])
++(define_code_iterator any_cond [gt ge lt le gtu geu ltu leu])
++(define_code_iterator any_cond4 [gt le gtu leu])
++(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le")
++ (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")])
++(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt")
++ (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")])
++
++;; For logical operations
++(define_code_iterator logical [and ior xor])
++(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
++
++;; Predicable operations with three register operands
++(define_code_iterator predicable_op3 [and ior xor plus minus])
++(define_code_attr predicable_insn3 [(and "and") (ior "or") (xor "eor") (plus "add") (minus "sub")])
++(define_code_attr predicable_commutative3 [(and "%") (ior "%") (xor "%") (plus "%") (minus "")])
++
++;; Load the predicates
++(include "predicates.md")
++
++
++;;******************************************************************************
++;; Automaton pipeline description for avr32
++;;******************************************************************************
++
++(define_automaton "avr32_ap")
++
++
++(define_cpu_unit "is" "avr32_ap")
++(define_cpu_unit "a1,m1,da" "avr32_ap")
++(define_cpu_unit "a2,m2,d" "avr32_ap")
++
++;;Alu instructions
++(define_insn_reservation "alu_op" 1
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "alu"))
++ "is,a1,a2")
++
++(define_insn_reservation "alu2_op" 2
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "alu2"))
++ "is,is+a1,a1+a2,a2")
++
++(define_insn_reservation "alu_sat_op" 2
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "alu_sat"))
++ "is,a1,a2")
++
++
++;;Mul instructions
++(define_insn_reservation "mulhh_op" 2
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "mulhh,mulwh"))
++ "is,m1,m2")
++
++(define_insn_reservation "mulww_w_op" 3
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "mulww_w"))
++ "is,m1,m1+m2,m2")
++
++(define_insn_reservation "mulww_d_op" 5
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "mulww_d"))
++ "is,m1,m1+m2,m1+m2,m2,m2")
++
++(define_insn_reservation "div_op" 33
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "div"))
++ "is,m1,m1*31 + m2*31,m2")
++
++(define_insn_reservation "machh_w_op" 3
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "machh_w"))
++ "is*2,m1,m2")
++
++
++(define_insn_reservation "macww_w_op" 4
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "macww_w"))
++ "is*2,m1,m1,m2")
++
++
++(define_insn_reservation "macww_d_op" 6
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "macww_d"))
++ "is*2,m1,m1+m2,m1+m2,m2")
++
++;;Bypasses for Mac instructions, because of accumulator cache.
++;;Set latency as low as possible in order to let the compiler let
++;;mul -> mac and mac -> mac combinations which use the same
++;;accumulator cache be placed close together to avoid any
++;;instructions which can ruin the accumulator cache come inbetween.
++(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
++(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
++(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
++
++(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
++(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
++(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
++
++
++;;Bypasses for all mul/mac instructions followed by an instruction
++;;which reads the output AND writes the result to the same register.
++;;This will generate an Write After Write hazard which gives an
++;;extra cycle before the result is ready.
++(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass")
++(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass")
++(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass")
++
++(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass")
++(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass")
++(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass")
++
++;;Branch and call instructions
++;;We assume that all branches and rcalls are predicted correctly :-)
++;;while calls use a lot of cycles.
++(define_insn_reservation "branch_op" 0
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "branch"))
++ "nothing")
++
++(define_insn_reservation "call_op" 10
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "call"))
++ "nothing")
++
++
++;;Load store instructions
++(define_insn_reservation "load_op" 2
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "load"))
++ "is,da,d")
++
++(define_insn_reservation "load_rm_op" 3
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "load_rm"))
++ "is,da,d")
++
++
++(define_insn_reservation "store_op" 0
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "store"))
++ "is,da,d")
++
++
++(define_insn_reservation "load_double_op" 3
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "load2"))
++ "is,da,da+d,d")
++
++(define_insn_reservation "load_quad_op" 4
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "load4"))
++ "is,da,da+d,da+d,d")
++
++(define_insn_reservation "store_double_op" 0
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "store2"))
++ "is,da,da+d,d")
++
++
++(define_insn_reservation "store_quad_op" 0
++ (and (eq_attr "pipeline" "ap")
++ (eq_attr "type" "store4"))
++ "is,da,da+d,da+d,d")
++
++;;For store the operand to write to memory is read in d and
++;;the real latency between any instruction and a store is therefore
++;;one less than for the instructions which reads the operands in the first
++;;excecution stage
++(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass")
++(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass")
++(define_bypass 1 "load_op" "store_op" "avr32_store_bypass")
++(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass")
++(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass")
++(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass")
++(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass")
++(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass")
++(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" )
++(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass")
++(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass")
++(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass")
++
++
++; Bypass for load double operation. If only the first loaded word is needed
++; then the latency is 2
++(define_bypass 2 "load_double_op"
++ "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
++ mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
++ "avr32_valid_load_double_bypass")
++
++; Bypass for load quad operation. If only the first or second loaded word is needed
++; we set the latency to 2
++(define_bypass 2 "load_quad_op"
++ "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
++ mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
++ "avr32_valid_load_quad_bypass")
++
++
++;;******************************************************************************
++;; End of Automaton pipeline description for avr32
++;;******************************************************************************
++
++(define_cond_exec
++ [(match_operator 0 "avr32_comparison_operator"
++ [(match_operand:CMP 1 "register_operand" "r")
++ (match_operand:CMP 2 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])]
++ "TARGET_V2_INSNS"
++ "%!"
++)
++
++(define_cond_exec
++ [(match_operator 0 "avr32_comparison_operator"
++ [(and:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "one_bit_set_operand" "i"))
++ (const_int 0)])]
++ "TARGET_V2_INSNS"
++ "%!"
++ )
++
++;;=============================================================================
++;; move
++;;-----------------------------------------------------------------------------
++
++
++;;== char - 8 bits ============================================================
++(define_expand "movqi"
++ [(set (match_operand:QI 0 "nonimmediate_operand" "")
++ (match_operand:QI 1 "general_operand" ""))]
++ ""
++ {
++ if ( can_create_pseudo_p () ){
++ if (GET_CODE (operands[1]) == MEM && optimize){
++ rtx reg = gen_reg_rtx (SImode);
++
++ emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
++ operands[1] = gen_lowpart (QImode, reg);
++ }
++
++ /* One of the ops has to be in a register. */
++ if (GET_CODE (operands[0]) == MEM)
++ operands[1] = force_reg (QImode, operands[1]);
++ }
++
++ })
++
++(define_insn "*movqi_internal"
++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
++ (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))]
++ "register_operand (operands[0], QImode)
++ || register_operand (operands[1], QImode)"
++ "@
++ mov\t%0, %1
++ ld.ub\t%0, %1
++ st.b\t%0, %1
++ mov\t%0, %1"
++ [(set_attr "length" "2,4,4,4")
++ (set_attr "type" "alu,load_rm,store,alu")])
++
++
++
++;;== short - 16 bits ==========================================================
++(define_expand "movhi"
++ [(set (match_operand:HI 0 "nonimmediate_operand" "")
++ (match_operand:HI 1 "general_operand" ""))]
++ ""
++ {
++ if ( can_create_pseudo_p () ){
++ if (GET_CODE (operands[1]) == MEM && optimize){
++ rtx reg = gen_reg_rtx (SImode);
++
++ emit_insn (gen_extendhisi2 (reg, operands[1]));
++ operands[1] = gen_lowpart (HImode, reg);
++ }
++
++ /* One of the ops has to be in a register. */
++ if (GET_CODE (operands[0]) == MEM)
++ operands[1] = force_reg (HImode, operands[1]);
++ }
++
++ })
++
++
++(define_insn "*movhi_internal"
++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
++ (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))]
++ "register_operand (operands[0], HImode)
++ || register_operand (operands[1], HImode)"
++ "@
++ mov\t%0, %1
++ ld.sh\t%0, %1
++ st.h\t%0, %1
++ mov\t%0, %1"
++ [(set_attr "length" "2,4,4,4")
++ (set_attr "type" "alu,load_rm,store,alu")])
++
++
++;;== int - 32 bits ============================================================
++
++(define_expand "movmisalignsi"
++ [(set (match_operand:SI 0 "nonimmediate_operand" "")
++ (match_operand:SI 1 "nonimmediate_operand" ""))]
++ "TARGET_UNALIGNED_WORD"
++ {
++ }
++)
++
++(define_expand "mov<mode>"
++ [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "")
++ (match_operand:MOVM 1 "avr32_non_rmw_general_operand" ""))]
++ ""
++ {
++
++ /* One of the ops has to be in a register. */
++ if (GET_CODE (operands[0]) == MEM)
++ operands[1] = force_reg (<MODE>mode, operands[1]);
++
++ /* Check for out of range immediate constants as these may
++ occur during reloading, since it seems like reload does
++ not check if the immediate is legitimate. Don't know if
++ this is a bug? */
++ if ( reload_in_progress
++ && avr32_imm_in_const_pool
++ && GET_CODE(operands[1]) == CONST_INT
++ && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
++ operands[1] = force_const_mem(SImode, operands[1]);
++ }
++ /* Check for RMW memory operands. They are not allowed for mov operations
++ only the atomic memc/s/t operations */
++ if ( !reload_in_progress
++ && avr32_rmw_memory_operand (operands[0], <MODE>mode) ){
++ operands[0] = copy_rtx (operands[0]);
++ XEXP(operands[0], 0) = force_reg (<MODE>mode, XEXP(operands[0], 0));
++ }
++
++ if ( !reload_in_progress
++ && avr32_rmw_memory_operand (operands[1], <MODE>mode) ){
++ operands[1] = copy_rtx (operands[1]);
++ XEXP(operands[1], 0) = force_reg (<MODE>mode, XEXP(operands[1], 0));
++ }
++ if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
++ && !avr32_legitimate_pic_operand_p(operands[1]) )
++ operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
++ (can_create_pseudo_p () ? 0: operands[0]));
++ else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) )
++ /* If we have an address operand then this function uses the pic register. */
++ crtl->uses_pic_offset_table = 1;
++ })
++
++
++(define_insn "mov<mode>_internal"
++ [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "=r, r, r,r,r,Q,r")
++ (match_operand:MOVM 1 "avr32_non_rmw_general_operand" "rKs08,Ks21,J,n,Q,r,W"))]
++ "(register_operand (operands[0], <MODE>mode)
++ || register_operand (operands[1], <MODE>mode))
++ && !avr32_rmw_memory_operand (operands[0], <MODE>mode)
++ && !avr32_rmw_memory_operand (operands[1], <MODE>mode)"
++ {
++ switch (which_alternative) {
++ case 0:
++ case 1: return "mov\t%0, %1";
++ case 2:
++ if ( TARGET_V2_INSNS )
++ return "movh\t%0, hi(%1)";
++ /* Fallthrough */
++ case 3: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)";
++ case 4:
++ if ( (REG_P(XEXP(operands[1], 0))
++ && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
++ || (GET_CODE(XEXP(operands[1], 0)) == PLUS
++ && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
++ && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
++ return "lddsp\t%0, %1";
++ else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
++ return "lddpc\t%0, %1";
++ else
++ return "ld.w\t%0, %1";
++ case 5:
++ if ( (REG_P(XEXP(operands[0], 0))
++ && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
++ || (GET_CODE(XEXP(operands[0], 0)) == PLUS
++ && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
++ && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
++ return "stdsp\t%0, %1";
++ else
++ return "st.w\t%0, %1";
++ case 6:
++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
++ return "lda.w\t%0, %1";
++ else
++ return "ld.w\t%0, r6[%1@got]";
++ default:
++ abort();
++ }
++ }
++
++ [(set_attr "length" "2,4,4,8,4,4,8")
++ (set_attr "type" "alu,alu,alu,alu2,load,store,load")
++ (set_attr "cc" "none,none,set_z_if_not_v2,set_z,none,none,clobber")])
++
++
++(define_expand "reload_out_rmw_memory_operand"
++ [(set (match_operand:SI 2 "register_operand" "=r")
++ (match_operand:SI 0 "address_operand" ""))
++ (set (mem:SI (match_dup 2))
++ (match_operand:SI 1 "register_operand" ""))]
++ ""
++ {
++ operands[0] = XEXP(operands[0], 0);
++ }
++)
++
++(define_expand "reload_in_rmw_memory_operand"
++ [(set (match_operand:SI 2 "register_operand" "=r")
++ (match_operand:SI 1 "address_operand" ""))
++ (set (match_operand:SI 0 "register_operand" "")
++ (mem:SI (match_dup 2)))]
++ ""
++ {
++ operands[1] = XEXP(operands[1], 0);
++ }
++)
++
++
++;; These instructions are for loading constants which cannot be loaded
++;; directly from the constant pool because the offset is too large
++;; high and lo_sum are used even tough for our case it should be
++;; low and high sum :-)
++(define_insn "mov_symbol_lo"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (high:SI (match_operand:SI 1 "immediate_operand" "i" )))]
++ ""
++ "mov\t%0, lo(%1)"
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")]
++)
++
++(define_insn "add_symbol_hi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (lo_sum:SI (match_dup 0)
++ (match_operand:SI 1 "immediate_operand" "i" )))]
++ ""
++ "orh\t%0, hi(%1)"
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")]
++)
++
++
++
++;; When generating pic, we need to load the symbol offset into a register.
++;; So that the optimizer does not confuse this with a normal symbol load
++;; we use an unspec. The offset will be loaded from a constant pool entry,
++;; since that is the only type of relocation we can use.
++(define_insn "pic_load_addr"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))]
++ "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))"
++ "lddpc\t%0, %1"
++ [(set_attr "type" "load")
++ (set_attr "length" "4")]
++)
++
++(define_insn "pic_compute_got_from_pc"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (unspec:SI [(minus:SI (pc)
++ (match_dup 0))] UNSPEC_PIC_BASE))
++ (use (label_ref (match_operand 1 "" "")))]
++ "flag_pic"
++ {
++ (*targetm.asm_out.internal_label) (asm_out_file, "L",
++ CODE_LABEL_NUMBER (operands[1]));
++ return \"rsub\t%0, pc\";
++ }
++ [(set_attr "cc" "clobber")
++ (set_attr "length" "2")]
++)
++
++;;== long long int - 64 bits ==================================================
++
++(define_expand "movdi"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "")
++ (match_operand:DI 1 "general_operand" ""))]
++ ""
++ {
++
++ /* One of the ops has to be in a register. */
++ if (GET_CODE (operands[0]) != REG)
++ operands[1] = force_reg (DImode, operands[1]);
++
++ })
++
++
++(define_insn_and_split "*movdi_internal"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r, r, r,r,r,m")
++ (match_operand:DI 1 "general_operand" "r, Ks08,Ks21,G,n,m,r"))]
++ "register_operand (operands[0], DImode)
++ || register_operand (operands[1], DImode)"
++ {
++ switch (which_alternative ){
++ case 0:
++ case 1:
++ case 2:
++ case 3:
++ case 4:
++ return "#";
++ case 5:
++ if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
++ return "ld.d\t%0, pc[%1 - .]";
++ else
++ return "ld.d\t%0, %1";
++ case 6:
++ return "st.d\t%0, %1";
++ default:
++ abort();
++ }
++ }
++;; Lets split all reg->reg or imm->reg transfers into two SImode transfers
++ "reload_completed &&
++ (REG_P (operands[0]) &&
++ (REG_P (operands[1])
++ || GET_CODE (operands[1]) == CONST_INT
++ || GET_CODE (operands[1]) == CONST_DOUBLE))"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 2) (match_dup 3))]
++ {
++ operands[2] = gen_highpart (SImode, operands[0]);
++ operands[0] = gen_lowpart (SImode, operands[0]);
++ if ( REG_P(operands[1]) ){
++ operands[3] = gen_highpart(SImode, operands[1]);
++ operands[1] = gen_lowpart(SImode, operands[1]);
++ } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
++ || GET_CODE(operands[1]) == CONST_INT ){
++ rtx split_const[2];
++ avr32_split_const_expr (DImode, SImode, operands[1], split_const);
++ operands[3] = split_const[1];
++ operands[1] = split_const[0];
++ } else {
++ internal_error("Illegal operand[1] for movdi split!");
++ }
++ }
++
++ [(set_attr "length" "*,*,*,*,*,4,4")
++ (set_attr "type" "*,*,*,*,*,load2,store2")
++ (set_attr "cc" "*,*,*,*,*,none,none")])
++
++
++;;== 128 bits ==================================================
++(define_expand "movti"
++ [(set (match_operand:TI 0 "nonimmediate_operand" "")
++ (match_operand:TI 1 "nonimmediate_operand" ""))]
++ "TARGET_ARCH_AP"
++ {
++
++ /* One of the ops has to be in a register. */
++ if (GET_CODE (operands[0]) != REG)
++ operands[1] = force_reg (TImode, operands[1]);
++
++ /* We must fix any pre_dec for loads and post_inc stores */
++ if ( GET_CODE (operands[0]) == MEM
++ && GET_CODE (XEXP(operands[0],0)) == POST_INC ){
++ emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]);
++ emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode))));
++ DONE;
++ }
++
++ if ( GET_CODE (operands[1]) == MEM
++ && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){
++ emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode))));
++ emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0)));
++ DONE;
++ }
++ })
++
++
++(define_insn_and_split "*movti_internal"
++ [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,&r, r, <RKu00,r,r")
++ (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,RKu00,r, n,T"))]
++ "(register_operand (operands[0], TImode)
++ || register_operand (operands[1], TImode))"
++ {
++ switch (which_alternative ){
++ case 0:
++ case 2:
++ case 4:
++ return "#";
++ case 1:
++ return "ldm\t%p1, %0";
++ case 3:
++ return "stm\t%p0, %1";
++ case 5:
++ return "ld.d\t%U0, pc[%1 - .]\;ld.d\t%B0, pc[%1 - . + 8]";
++ }
++ }
++
++ "reload_completed &&
++ (REG_P (operands[0]) &&
++ (REG_P (operands[1])
++ /* If this is a load from the constant pool we split it into
++ two double loads. */
++ || (GET_CODE (operands[1]) == MEM
++ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
++ && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
++ /* If this is a load where the pointer register is a part
++ of the register list, we must split it into two double
++ loads in order for it to be exception safe. */
++ || (GET_CODE (operands[1]) == MEM
++ && register_operand (XEXP (operands[1], 0), SImode)
++ && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0)))
++ || GET_CODE (operands[1]) == CONST_INT
++ || GET_CODE (operands[1]) == CONST_DOUBLE))"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 2) (match_dup 3))]
++ {
++ operands[2] = simplify_gen_subreg ( DImode, operands[0],
++ TImode, 0 );
++ operands[0] = simplify_gen_subreg ( DImode, operands[0],
++ TImode, 8 );
++ if ( REG_P(operands[1]) ){
++ operands[3] = simplify_gen_subreg ( DImode, operands[1],
++ TImode, 0 );
++ operands[1] = simplify_gen_subreg ( DImode, operands[1],
++ TImode, 8 );
++ } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
++ || GET_CODE(operands[1]) == CONST_INT ){
++ rtx split_const[2];
++ avr32_split_const_expr (TImode, DImode, operands[1], split_const);
++ operands[3] = split_const[1];
++ operands[1] = split_const[0];
++ } else if (avr32_const_pool_ref_operand (operands[1], GET_MODE(operands[1]))){
++ rtx split_const[2];
++ rtx cop = avoid_constant_pool_reference (operands[1]);
++ if (operands[1] == cop)
++ cop = get_pool_constant (XEXP (operands[1], 0));
++ avr32_split_const_expr (TImode, DImode, cop, split_const);
++ operands[3] = force_const_mem (DImode, split_const[1]);
++ operands[1] = force_const_mem (DImode, split_const[0]);
++ } else {
++ rtx ptr_reg = XEXP (operands[1], 0);
++ operands[1] = gen_rtx_MEM (DImode,
++ gen_rtx_PLUS ( SImode,
++ ptr_reg,
++ GEN_INT (8) ));
++ operands[3] = gen_rtx_MEM (DImode,
++ ptr_reg);
++
++ /* Check if the first load will clobber the pointer.
++ If so, we must switch the order of the operations. */
++ if ( reg_overlap_mentioned_p (operands[0], ptr_reg) )
++ {
++ /* We need to switch the order of the operations
++ so that the pointer register does not get clobbered
++ after the first double word load. */
++ rtx tmp;
++ tmp = operands[0];
++ operands[0] = operands[2];
++ operands[2] = tmp;
++ tmp = operands[1];
++ operands[1] = operands[3];
++ operands[3] = tmp;
++ }
++
++
++ }
++ }
++ [(set_attr "length" "*,*,4,4,*,8")
++ (set_attr "type" "*,*,load4,store4,*,load4")])
++
++
++;;== float - 32 bits ==========================================================
++(define_expand "movsf"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "")
++ (match_operand:SF 1 "general_operand" ""))]
++ ""
++ {
++
++
++ /* One of the ops has to be in a register. */
++ if (GET_CODE (operands[0]) != REG)
++ operands[1] = force_reg (SFmode, operands[1]);
++
++ })
++
++(define_insn "*movsf_internal"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,m")
++ (match_operand:SF 1 "general_operand" "r, G,F,m,r"))]
++ "(register_operand (operands[0], SFmode)
++ || register_operand (operands[1], SFmode))"
++ {
++ switch (which_alternative) {
++ case 0:
++ case 1: return "mov\t%0, %1";
++ case 2:
++ {
++ HOST_WIDE_INT target_float[2];
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode);
++ if ( TARGET_V2_INSNS
++ && avr32_hi16_immediate_operand (GEN_INT (target_float[0]), VOIDmode) )
++ return "movh\t%0, hi(%1)";
++ else
++ return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)";
++ }
++ case 3:
++ if ( (REG_P(XEXP(operands[1], 0))
++ && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
++ || (GET_CODE(XEXP(operands[1], 0)) == PLUS
++ && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
++ && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
++ && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
++ return "lddsp\t%0, %1";
++ else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
++ return "lddpc\t%0, %1";
++ else
++ return "ld.w\t%0, %1";
++ case 4:
++ if ( (REG_P(XEXP(operands[0], 0))
++ && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
++ || (GET_CODE(XEXP(operands[0], 0)) == PLUS
++ && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
++ && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
++ && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
++ return "stdsp\t%0, %1";
++ else
++ return "st.w\t%0, %1";
++ default:
++ abort();
++ }
++ }
++
++ [(set_attr "length" "2,4,8,4,4")
++ (set_attr "type" "alu,alu,alu2,load,store")
++ (set_attr "cc" "none,none,clobber,none,none")])
++
++
++
++;;== double - 64 bits =========================================================
++(define_expand "movdf"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "")
++ (match_operand:DF 1 "general_operand" ""))]
++ ""
++ {
++ /* One of the ops has to be in a register. */
++ if (GET_CODE (operands[0]) != REG){
++ operands[1] = force_reg (DFmode, operands[1]);
++ }
++ })
++
++
++(define_insn_and_split "*movdf_internal"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,r,m")
++ (match_operand:DF 1 "general_operand" " r,G,F,m,r"))]
++ "(register_operand (operands[0], DFmode)
++ || register_operand (operands[1], DFmode))"
++ {
++ switch (which_alternative ){
++ case 0:
++ case 1:
++ case 2:
++ return "#";
++ case 3:
++ if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
++ return "ld.d\t%0, pc[%1 - .]";
++ else
++ return "ld.d\t%0, %1";
++ case 4:
++ return "st.d\t%0, %1";
++ default:
++ abort();
++ }
++ }
++ "reload_completed
++ && (REG_P (operands[0])
++ && (REG_P (operands[1])
++ || GET_CODE (operands[1]) == CONST_DOUBLE))"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 2) (match_dup 3))]
++ "
++ {
++ operands[2] = gen_highpart (SImode, operands[0]);
++ operands[0] = gen_lowpart (SImode, operands[0]);
++ operands[3] = gen_highpart(SImode, operands[1]);
++ operands[1] = gen_lowpart(SImode, operands[1]);
++ }
++ "
++
++ [(set_attr "length" "*,*,*,4,4")
++ (set_attr "type" "*,*,*,load2,store2")
++ (set_attr "cc" "*,*,*,none,none")])
++
++
++;;=============================================================================
++;; Conditional Moves
++;;=============================================================================
++(define_insn "ld<mode>_predicable"
++ [(set (match_operand:MOVCC 0 "register_operand" "=r")
++ (match_operand:MOVCC 1 "avr32_non_rmw_memory_operand" "<MOVCC:pred_mem_constraint>"))]
++ "TARGET_V2_INSNS"
++ "ld<MOVCC:load_postfix>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++
++(define_insn "st<mode>_predicable"
++ [(set (match_operand:MOVCC 0 "avr32_non_rmw_memory_operand" "=<MOVCC:pred_mem_constraint>")
++ (match_operand:MOVCC 1 "register_operand" "r"))]
++ "TARGET_V2_INSNS"
++ "st<MOVCC:store_postfix>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "store")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "mov<mode>_predicable"
++ [(set (match_operand:MOVCC 0 "register_operand" "=r")
++ (match_operand:MOVCC 1 "avr32_cond_register_immediate_operand" "rKs08"))]
++ ""
++ "mov%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "alu")
++ (set_attr "predicable" "yes")]
++)
++
++
++;;=============================================================================
++;; Move chunks of memory
++;;=============================================================================
++
++(define_expand "movmemsi"
++ [(match_operand:BLK 0 "general_operand" "")
++ (match_operand:BLK 1 "general_operand" "")
++ (match_operand:SI 2 "const_int_operand" "")
++ (match_operand:SI 3 "const_int_operand" "")]
++ ""
++ "
++ if (avr32_gen_movmemsi (operands))
++ DONE;
++ FAIL;
++ "
++ )
++
++
++
++
++;;=============================================================================
++;; Bit field instructions
++;;-----------------------------------------------------------------------------
++;; Instructions to insert or extract bit-fields
++;;=============================================================================
++
++(define_insn "insv"
++ [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
++ (match_operand:SI 1 "immediate_operand" "Ku05")
++ (match_operand:SI 2 "immediate_operand" "Ku05"))
++ (match_operand 3 "register_operand" "r"))]
++ ""
++ "bfins\t%0, %3, %2, %1"
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")
++ (set_attr "cc" "set_ncz")])
++
++
++
++(define_expand "extv"
++ [ (set (match_operand:SI 0 "register_operand" "")
++ (sign_extract:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")
++ (match_operand:SI 3 "immediate_operand" "")))]
++ ""
++ {
++ if ( INTVAL(operands[2]) >= 32 )
++ FAIL;
++ }
++)
++
++(define_expand "extzv"
++ [ (set (match_operand:SI 0 "register_operand" "")
++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")
++ (match_operand:SI 3 "immediate_operand" "")))]
++ ""
++ {
++ if ( INTVAL(operands[2]) >= 32 )
++ FAIL;
++ }
++)
++
++(define_insn "extv_internal"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "Ku05")
++ (match_operand:SI 3 "immediate_operand" "Ku05")))]
++ "INTVAL(operands[2]) < 32"
++ "bfexts\t%0, %1, %3, %2"
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")
++ (set_attr "cc" "set_ncz")])
++
++
++(define_insn "extzv_internal"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "Ku05")
++ (match_operand:SI 3 "immediate_operand" "Ku05")))]
++ "INTVAL(operands[2]) < 32"
++ "bfextu\t%0, %1, %3, %2"
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")
++ (set_attr "cc" "set_ncz")])
++
++
++
++;;=============================================================================
++;; Some peepholes for avoiding unnecessary cast instructions
++;; followed by bfins.
++;;-----------------------------------------------------------------------------
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
++ (set (zero_extract:SI (match_operand 2 "register_operand" "")
++ (match_operand:SI 3 "immediate_operand" "")
++ (match_operand:SI 4 "immediate_operand" ""))
++ (match_dup 0))]
++ "((peep2_reg_dead_p(2, operands[0]) &&
++ (INTVAL(operands[3]) <= 8)))"
++ [(set (zero_extract:SI (match_dup 2)
++ (match_dup 3)
++ (match_dup 4))
++ (match_dup 1))]
++ )
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
++ (set (zero_extract:SI (match_operand 2 "register_operand" "")
++ (match_operand:SI 3 "immediate_operand" "")
++ (match_operand:SI 4 "immediate_operand" ""))
++ (match_dup 0))]
++ "((peep2_reg_dead_p(2, operands[0]) &&
++ (INTVAL(operands[3]) <= 16)))"
++ [(set (zero_extract:SI (match_dup 2)
++ (match_dup 3)
++ (match_dup 4))
++ (match_dup 1))]
++ )
++
++;;=============================================================================
++;; push bytes
++;;-----------------------------------------------------------------------------
++;; Implements the push instruction
++;;=============================================================================
++(define_insn "pushm"
++ [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM)))
++ (unspec:BLK [(match_operand 0 "const_int_operand" "")]
++ UNSPEC_PUSHM))]
++ ""
++ {
++ if (INTVAL(operands[0])) {
++ return "pushm\t%r0";
++ } else {
++ return "";
++ }
++ }
++ [(set_attr "type" "store")
++ (set_attr "length" "2")
++ (set_attr "cc" "none")])
++
++(define_insn "stm"
++ [(unspec [(match_operand 0 "register_operand" "r")
++ (match_operand 1 "const_int_operand" "")
++ (match_operand 2 "const_int_operand" "")]
++ UNSPEC_STM)]
++ ""
++ {
++ if (INTVAL(operands[1])) {
++ if (INTVAL(operands[2]) != 0)
++ return "stm\t--%0, %s1";
++ else
++ return "stm\t%0, %s1";
++ } else {
++ return "";
++ }
++ }
++ [(set_attr "type" "store")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++
++
++(define_insn "popm"
++ [(unspec [(match_operand 0 "const_int_operand" "")]
++ UNSPEC_POPM)]
++ ""
++ {
++ if (INTVAL(operands[0])) {
++ return "popm %r0";
++ } else {
++ return "";
++ }
++ }
++ [(set_attr "type" "load")
++ (set_attr "length" "2")])
++
++
++
++;;=============================================================================
++;; add
++;;-----------------------------------------------------------------------------
++;; Adds reg1 with reg2 and puts the result in reg0.
++;;=============================================================================
++(define_insn "add<mode>3"
++ [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r")
++ (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0")
++ (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))]
++ ""
++ "@
++ add %0, %2
++ add %0, %1, %2
++ sub %0, %n2
++ sub %0, %1, %n2
++ sub %0, %n2"
++
++ [(set_attr "length" "2,4,2,4,4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++(define_insn "add<mode>3_lsl"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (plus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r")
++ (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))
++ (match_operand:INTM 2 "register_operand" "r")))]
++ ""
++ "add %0, %2, %1 << %3"
++ [(set_attr "length" "4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++(define_insn "add<mode>3_lsl2"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (plus:INTM (match_operand:INTM 1 "register_operand" "r")
++ (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
++ (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
++ ""
++ "add %0, %1, %2 << %3"
++ [(set_attr "length" "4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++
++(define_insn "add<mode>3_mul"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (plus:INTM (mult:INTM (match_operand:INTM 1 "register_operand" "r")
++ (match_operand:INTM 3 "immediate_operand" "Ku04" ))
++ (match_operand:INTM 2 "register_operand" "r")))]
++ "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
++ (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
++ "add %0, %2, %1 << %p3"
++ [(set_attr "length" "4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++(define_insn "add<mode>3_mul2"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (plus:INTM (match_operand:INTM 1 "register_operand" "r")
++ (mult:INTM (match_operand:INTM 2 "register_operand" "r")
++ (match_operand:INTM 3 "immediate_operand" "Ku04" ))))]
++ "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
++ (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
++ "add %0, %1, %2 << %p3"
++ [(set_attr "length" "4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (ashift:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (plus:SI (match_dup 0)
++ (match_operand:SI 4 "register_operand" "")))]
++ "(peep2_reg_dead_p(2, operands[0]) &&
++ (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
++ [(set (match_dup 3)
++ (plus:SI (ashift:SI (match_dup 1)
++ (match_dup 2))
++ (match_dup 4)))]
++ )
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (ashift:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (plus:SI (match_operand:SI 4 "register_operand" "")
++ (match_dup 0)))]
++ "(peep2_reg_dead_p(2, operands[0]) &&
++ (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
++ [(set (match_dup 3)
++ (plus:SI (ashift:SI (match_dup 1)
++ (match_dup 2))
++ (match_dup 4)))]
++ )
++
++(define_insn "adddi3"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (plus:DI (match_operand:DI 1 "register_operand" "%0,r")
++ (match_operand:DI 2 "register_operand" "r,r")))]
++ ""
++ "@
++ add %0, %2\;adc %m0, %m0, %m2
++ add %0, %1, %2\;adc %m0, %m1, %m2"
++ [(set_attr "length" "6,8")
++ (set_attr "type" "alu2")
++ (set_attr "cc" "set_vncz")])
++
++
++(define_insn "add<mode>_imm_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "+r")
++ (plus:INTM (match_dup 0)
++ (match_operand:INTM 1 "avr32_cond_immediate_operand" "%Is08")))]
++ ""
++ "sub%?\t%0, -%1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++)
++
++;;=============================================================================
++;; subtract
++;;-----------------------------------------------------------------------------
++;; Subtract reg2 or immediate value from reg0 and puts the result in reg0.
++;;=============================================================================
++
++(define_insn "sub<mode>3"
++ [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r")
++ (minus:INTM (match_operand:INTM 1 "register_const_int_operand" "0,r,0,r,0,r,Ks08")
++ (match_operand:INTM 2 "register_const_int_operand" "r,r,Ks08,Ks16,Ks21,0,r")))]
++ ""
++ "@
++ sub %0, %2
++ sub %0, %1, %2
++ sub %0, %2
++ sub %0, %1, %2
++ sub %0, %2
++ rsub %0, %1
++ rsub %0, %2, %1"
++ [(set_attr "length" "2,4,2,4,4,2,4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++(define_insn "*sub<mode>3_mul"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (minus:INTM (match_operand:INTM 1 "register_operand" "r")
++ (mult:INTM (match_operand:INTM 2 "register_operand" "r")
++ (match_operand:SI 3 "immediate_operand" "Ku04" ))))]
++ "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
++ (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
++ "sub %0, %1, %2 << %p3"
++ [(set_attr "length" "4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++(define_insn "*sub<mode>3_lsl"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (minus:INTM (match_operand:INTM 1 "register_operand" "r")
++ (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
++ (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
++ ""
++ "sub %0, %1, %2 << %3"
++ [(set_attr "length" "4")
++ (set_attr "cc" "<INTM:alu_cc_attr>")])
++
++
++(define_insn "subdi3"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (minus:DI (match_operand:DI 1 "register_operand" "%0,r")
++ (match_operand:DI 2 "register_operand" "r,r")))]
++ ""
++ "@
++ sub %0, %2\;sbc %m0, %m0, %m2
++ sub %0, %1, %2\;sbc %m0, %m1, %m2"
++ [(set_attr "length" "6,8")
++ (set_attr "type" "alu2")
++ (set_attr "cc" "set_vncz")])
++
++
++(define_insn "sub<mode>_imm_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "+r")
++ (minus:INTM (match_dup 0)
++ (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")))]
++ ""
++ "sub%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
++
++(define_insn "rsub<mode>_imm_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "+r")
++ (minus:INTM (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")
++ (match_dup 0)))]
++ ""
++ "rsub%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
++
++;;=============================================================================
++;; multiply
++;;-----------------------------------------------------------------------------
++;; Multiply op1 and op2 and put the value in op0.
++;;=============================================================================
++
++
++(define_insn "mulqi3"
++ [(set (match_operand:QI 0 "register_operand" "=r,r,r")
++ (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
++ (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
++ "!TARGET_NO_MUL_INSNS"
++ {
++ switch (which_alternative){
++ case 0:
++ return "mul %0, %2";
++ case 1:
++ return "mul %0, %1, %2";
++ case 2:
++ return "mul %0, %1, %2";
++ default:
++ gcc_unreachable();
++ }
++ }
++ [(set_attr "type" "mulww_w,mulww_w,mulwh")
++ (set_attr "length" "2,4,4")
++ (set_attr "cc" "none")])
++
++(define_insn "mulsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
++ (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
++ (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
++ "!TARGET_NO_MUL_INSNS"
++ {
++ switch (which_alternative){
++ case 0:
++ return "mul %0, %2";
++ case 1:
++ return "mul %0, %1, %2";
++ case 2:
++ return "mul %0, %1, %2";
++ default:
++ gcc_unreachable();
++ }
++ }
++ [(set_attr "type" "mulww_w,mulww_w,mulwh")
++ (set_attr "length" "2,4,4")
++ (set_attr "cc" "none")])
++
++
++(define_insn "mulhisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mult:SI
++ (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulhh.w %0, %1:b, %2:b"
++ [(set_attr "type" "mulhh")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++(define_peephole2
++ [(match_scratch:DI 6 "r")
++ (set (match_operand:SI 0 "register_operand" "")
++ (mult:SI
++ (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
++ (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
++ (set (match_operand:SI 3 "register_operand" "")
++ (ashiftrt:SI (match_dup 0)
++ (const_int 16)))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP
++ && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
++ [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
++ (set (match_dup 6)
++ (ashift:DI (mult:DI (sign_extend:DI (match_dup 4))
++ (sign_extend:DI (match_dup 2)))
++ (const_int 16)))
++ (set (match_dup 3) (match_dup 5))]
++
++ "{
++ operands[4] = gen_rtx_REG(SImode, REGNO(operands[1]));
++ operands[5] = gen_highpart (SImode, operands[4]);
++ }"
++ )
++
++(define_insn "mulnhisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mult:SI
++ (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulnhh.w %0, %1:b, %2:b"
++ [(set_attr "type" "mulhh")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++(define_insn "machisi3"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (plus:SI (mult:SI
++ (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
++ (match_dup 0)))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "machh.w %0, %1:b, %2:b"
++ [(set_attr "type" "machh_w")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++
++
++(define_insn "mulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI
++ (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
++ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
++ "!TARGET_NO_MUL_INSNS"
++ "muls.d %0, %1, %2"
++ [(set_attr "type" "mulww_d")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++(define_insn "umulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI
++ (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
++ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
++ "!TARGET_NO_MUL_INSNS"
++ "mulu.d %0, %1, %2"
++ [(set_attr "type" "mulww_d")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++(define_insn "*mulaccsi3"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
++ (match_operand:SI 2 "register_operand" "r"))
++ (match_dup 0)))]
++ "!TARGET_NO_MUL_INSNS"
++ "mac %0, %1, %2"
++ [(set_attr "type" "macww_w")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++(define_insn "*mulaccsidi3"
++ [(set (match_operand:DI 0 "register_operand" "+r")
++ (plus:DI (mult:DI
++ (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
++ (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
++ (match_dup 0)))]
++ "!TARGET_NO_MUL_INSNS"
++ "macs.d %0, %1, %2"
++ [(set_attr "type" "macww_d")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++(define_insn "*umulaccsidi3"
++ [(set (match_operand:DI 0 "register_operand" "+r")
++ (plus:DI (mult:DI
++ (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
++ (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
++ (match_dup 0)))]
++ "!TARGET_NO_MUL_INSNS"
++ "macu.d %0, %1, %2"
++ [(set_attr "type" "macww_d")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++
++
++;; Try to avoid Write-After-Write hazards for mul operations
++;; if it can be done
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (mult:SI
++ (sign_extend:SI (match_operand 1 "general_operand" ""))
++ (sign_extend:SI (match_operand 2 "general_operand" ""))))
++ (set (match_dup 0)
++ (match_operator:SI 3 "alu_operator" [(match_dup 0)
++ (match_operand 4 "general_operand" "")]))]
++ "peep2_reg_dead_p(1, operands[2])"
++ [(set (match_dup 5)
++ (mult:SI
++ (sign_extend:SI (match_dup 1))
++ (sign_extend:SI (match_dup 2))))
++ (set (match_dup 0)
++ (match_op_dup 3 [(match_dup 5)
++ (match_dup 4)]))]
++ "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}"
++ )
++
++
++
++;;=============================================================================
++;; DSP instructions
++;;=============================================================================
++(define_insn "mulsathh_h"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 15))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulsathh.h\t%0, %1:b, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulhh")])
++
++(define_insn "mulsatrndhh_h"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (ss_truncate:HI (ashiftrt:SI
++ (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
++ (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 1073741824))
++ (const_int 15))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulsatrndhh.h\t%0, %1:b, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulhh")])
++
++(define_insn "mulsathh_w"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 1))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulsathh.w\t%0, %1:b, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulhh")])
++
++(define_insn "mulsatwh_w"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 15))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulsatwh.w\t%0, %1, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulwh")])
++
++(define_insn "mulsatrndwh_w"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 1073741824))
++ (const_int 15))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulsatrndwh.w\t%0, %1, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulwh")])
++
++(define_insn "macsathh_w"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (plus:SI (match_dup 0)
++ (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 1)))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "macsathh.w\t%0, %1:b, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulhh")])
++
++
++(define_insn "mulwh_d"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 16)))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulwh.d\t%0, %1, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulwh")])
++
++
++(define_insn "mulnwh_d"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 16)))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "mulnwh.d\t%0, %1, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulwh")])
++
++(define_insn "macwh_d"
++ [(set (match_operand:DI 0 "register_operand" "+r")
++ (plus:DI (match_dup 0)
++ (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
++ (const_int 16))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "macwh.d\t%0, %1, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulwh")])
++
++(define_insn "machh_d"
++ [(set (match_operand:DI 0 "register_operand" "+r")
++ (plus:DI (match_dup 0)
++ (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
++ (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
++ "!TARGET_NO_MUL_INSNS && TARGET_DSP"
++ "machh.d\t%0, %1:b, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "mulwh")])
++
++(define_insn "satadd_w"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_DSP"
++ "satadd.w\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "alu_sat")])
++
++(define_insn "satsub_w"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ss_minus:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_DSP"
++ "satsub.w\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "alu_sat")])
++
++(define_insn "satadd_h"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (ss_plus:HI (match_operand:HI 1 "register_operand" "r")
++ (match_operand:HI 2 "register_operand" "r")))]
++ "TARGET_DSP"
++ "satadd.h\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "alu_sat")])
++
++(define_insn "satsub_h"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (ss_minus:HI (match_operand:HI 1 "register_operand" "r")
++ (match_operand:HI 2 "register_operand" "r")))]
++ "TARGET_DSP"
++ "satsub.h\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")
++ (set_attr "type" "alu_sat")])
++
++
++;;=============================================================================
++;; smin
++;;-----------------------------------------------------------------------------
++;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed
++;; values in the registers.
++;;=============================================================================
++(define_insn "sminsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (smin:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ ""
++ "min %0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++;;=============================================================================
++;; smax
++;;-----------------------------------------------------------------------------
++;; Set reg0 to the largest value of reg1 and reg2. It is used for signed
++;; values in the registers.
++;;=============================================================================
++(define_insn "smaxsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (smax:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ ""
++ "max %0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++
++
++;;=============================================================================
++;; Logical operations
++;;-----------------------------------------------------------------------------
++
++
++;; Split up simple DImode logical operations. Simply perform the logical
++;; operation on the upper and lower halves of the registers.
++(define_split
++ [(set (match_operand:DI 0 "register_operand" "")
++ (match_operator:DI 6 "logical_binary_operator"
++ [(match_operand:DI 1 "register_operand" "")
++ (match_operand:DI 2 "register_operand" "")]))]
++ "reload_completed"
++ [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
++ (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
++ "
++ {
++ operands[3] = gen_highpart (SImode, operands[0]);
++ operands[0] = gen_lowpart (SImode, operands[0]);
++ operands[4] = gen_highpart (SImode, operands[1]);
++ operands[1] = gen_lowpart (SImode, operands[1]);
++ operands[5] = gen_highpart (SImode, operands[2]);
++ operands[2] = gen_lowpart (SImode, operands[2]);
++ }"
++)
++
++;;=============================================================================
++;; Logical operations with shifted operand
++;;=============================================================================
++(define_insn "<code>si_lshift"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (logical:SI (match_operator:SI 4 "logical_shift_operator"
++ [(match_operand:SI 2 "register_operand" "r")
++ (match_operand:SI 3 "immediate_operand" "Ku05")])
++ (match_operand:SI 1 "register_operand" "r")))]
++ ""
++ {
++ if ( GET_CODE(operands[4]) == ASHIFT )
++ return "<logical_insn>\t%0, %1, %2 << %3";
++ else
++ return "<logical_insn>\t%0, %1, %2 >> %3";
++ }
++
++ [(set_attr "cc" "set_z")]
++)
++
++
++;;************************************************
++;; Peepholes for detecting logical operantions
++;; with shifted operands
++;;************************************************
++
++(define_peephole
++ [(set (match_operand:SI 3 "register_operand" "")
++ (match_operator:SI 5 "logical_shift_operator"
++ [(match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")]))
++ (set (match_operand:SI 0 "register_operand" "")
++ (logical:SI (match_operand:SI 4 "register_operand" "")
++ (match_dup 3)))]
++ "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
++ {
++ if ( GET_CODE(operands[5]) == ASHIFT )
++ return "<logical_insn>\t%0, %4, %1 << %2";
++ else
++ return "<logical_insn>\t%0, %4, %1 >> %2";
++ }
++ [(set_attr "cc" "set_z")]
++ )
++
++(define_peephole
++ [(set (match_operand:SI 3 "register_operand" "")
++ (match_operator:SI 5 "logical_shift_operator"
++ [(match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")]))
++ (set (match_operand:SI 0 "register_operand" "")
++ (logical:SI (match_dup 3)
++ (match_operand:SI 4 "register_operand" "")))]
++ "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
++ {
++ if ( GET_CODE(operands[5]) == ASHIFT )
++ return "<logical_insn>\t%0, %4, %1 << %2";
++ else
++ return "<logical_insn>\t%0, %4, %1 >> %2";
++ }
++ [(set_attr "cc" "set_z")]
++ )
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (match_operator:SI 5 "logical_shift_operator"
++ [(match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")]))
++ (set (match_operand:SI 3 "register_operand" "")
++ (logical:SI (match_operand:SI 4 "register_operand" "")
++ (match_dup 0)))]
++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
++
++ [(set (match_dup 3)
++ (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
++ (match_dup 4)))]
++
++ ""
++)
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (match_operator:SI 5 "logical_shift_operator"
++ [(match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")]))
++ (set (match_operand:SI 3 "register_operand" "")
++ (logical:SI (match_dup 0)
++ (match_operand:SI 4 "register_operand" "")))]
++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
++
++ [(set (match_dup 3)
++ (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
++ (match_dup 4)))]
++
++ ""
++)
++
++
++;;=============================================================================
++;; and
++;;-----------------------------------------------------------------------------
++;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0.
++;;=============================================================================
++
++(define_insn "andnsi"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (and:SI (match_dup 0)
++ (not:SI (match_operand:SI 1 "register_operand" "r"))))]
++ ""
++ "andn %0, %1"
++ [(set_attr "cc" "set_z")
++ (set_attr "length" "2")]
++)
++
++
++(define_insn "andsi3"
++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r,r, r, r,r,r,r,r")
++ (and:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,r,0,0, 0, 0,0,0,0,r" )
++ (match_operand:SI 2 "nonmemory_operand" " N,M,N,Ku16,Ks17,J,L,r,i,r")))]
++ ""
++ "@
++ memc\t%0, %z2
++ bfextu\t%0, %1, 0, %z2
++ cbr\t%0, %z2
++ andl\t%0, %2, COH
++ andl\t%0, lo(%2)
++ andh\t%0, hi(%2), COH
++ andh\t%0, hi(%2)
++ and\t%0, %2
++ andh\t%0, hi(%2)\;andl\t%0, lo(%2)
++ and\t%0, %1, %2"
++
++ [(set_attr "length" "4,4,2,4,4,4,4,2,8,4")
++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z")])
++
++
++
++(define_insn "anddi3"
++ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
++ (and:DI (match_operand:DI 1 "register_operand" "%0,r")
++ (match_operand:DI 2 "register_operand" "r,r")))]
++ ""
++ "#"
++ [(set_attr "length" "8")
++ (set_attr "cc" "clobber")]
++)
++
++;;=============================================================================
++;; or
++;;-----------------------------------------------------------------------------
++;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0.
++;;=============================================================================
++
++(define_insn "iorsi3"
++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r, r,r,r,r")
++ (ior:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0,0, 0,0,0,r" )
++ (match_operand:SI 2 "nonmemory_operand" " O,O,Ku16,J,r,i,r")))]
++ ""
++ "@
++ mems\t%0, %p2
++ sbr\t%0, %p2
++ orl\t%0, %2
++ orh\t%0, hi(%2)
++ or\t%0, %2
++ orh\t%0, hi(%2)\;orl\t%0, lo(%2)
++ or\t%0, %1, %2"
++
++ [(set_attr "length" "4,2,4,4,2,8,4")
++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z")])
++
++
++(define_insn "iordi3"
++ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
++ (ior:DI (match_operand:DI 1 "register_operand" "%0,r")
++ (match_operand:DI 2 "register_operand" "r,r")))]
++ ""
++ "#"
++ [(set_attr "length" "8")
++ (set_attr "cc" "clobber")]
++)
++
++;;=============================================================================
++;; xor bytes
++;;-----------------------------------------------------------------------------
++;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0.
++;;=============================================================================
++
++(define_insn "xorsi3"
++ [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r, r,r,r,r")
++ (xor:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0, 0,0,0,r" )
++ (match_operand:SI 2 "nonmemory_operand" " O,Ku16,J,r,i,r")))]
++ ""
++ "@
++ memt\t%0, %p2
++ eorl\t%0, %2
++ eorh\t%0, hi(%2)
++ eor\t%0, %2
++ eorh\t%0, hi(%2)\;eorl\t%0, lo(%2)
++ eor\t%0, %1, %2"
++
++ [(set_attr "length" "4,4,4,2,8,4")
++ (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z")])
++
++(define_insn "xordi3"
++ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
++ (xor:DI (match_operand:DI 1 "register_operand" "%0,r")
++ (match_operand:DI 2 "register_operand" "r,r")))]
++ ""
++ "#"
++ [(set_attr "length" "8")
++ (set_attr "cc" "clobber")]
++)
++
++;;=============================================================================
++;; Three operand predicable insns
++;;=============================================================================
++
++(define_insn "<predicable_insn3><mode>_predicable"
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
++ (match_operand:INTM 2 "register_operand" "r")))]
++ "TARGET_V2_INSNS"
++ "<predicable_insn3>%?\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn_and_split "<predicable_insn3><mode>_imm_clobber_predicable"
++ [(parallel
++ [(set (match_operand:INTM 0 "register_operand" "=r")
++ (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
++ (match_operand:INTM 2 "avr32_mov_immediate_operand" "JKs21")))
++ (clobber (match_operand:INTM 3 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
++ return "%! mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
++ return "%! mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else
++ return "%! movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
++ }
++ else
++ {
++ if ( !avr32_cond_imm_clobber_splittable (insn, operands) )
++ {
++ if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
++ return "mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
++ return "mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
++ else
++ return "movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
++ }
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload
++ && avr32_cond_imm_clobber_splittable (insn, operands))"
++ [(set (match_dup 0)
++ (predicable_op3:INTM (match_dup 1)
++ (match_dup 2)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++
++
++;;=============================================================================
++;; Zero extend predicable insns
++;;=============================================================================
++(define_insn_and_split "zero_extendhisi_clobber_predicable"
++ [(parallel
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))
++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ return "%! mov\t%2, 0xffff\;and%?\t%0, %1, %2";
++ }
++ else
++ {
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload)"
++ [(set (match_dup 0)
++ (zero_extend:SI (match_dup 1)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++
++(define_insn_and_split "zero_extendqisi_clobber_predicable"
++ [(parallel
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))
++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
++ }
++ else
++ {
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload)"
++ [(set (match_dup 0)
++ (zero_extend:SI (match_dup 1)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++
++(define_insn_and_split "zero_extendqihi_clobber_predicable"
++ [(parallel
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))
++ (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
++ "TARGET_V2_INSNS"
++ {
++ if ( current_insn_predicate != NULL_RTX )
++ {
++ return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
++ }
++ else
++ {
++ return "#";
++ }
++
++ }
++ ;; If we find out that we could not actually do if-conversion on the block
++ ;; containing this insn we convert it back to normal immediate format
++ ;; to avoid outputing a redundant move insn
++ ;; Do not split until after we have checked if we can make the insn
++ ;; conditional.
++ "(GET_CODE (PATTERN (insn)) != COND_EXEC
++ && cfun->machine->ifcvt_after_reload)"
++ [(set (match_dup 0)
++ (zero_extend:HI (match_dup 1)))]
++ ""
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")]
++ )
++;;=============================================================================
++;; divmod
++;;-----------------------------------------------------------------------------
++;; Signed division that produces both a quotient and a remainder.
++;;=============================================================================
++
++(define_expand "divmodsi4"
++ [(parallel [
++ (parallel [
++ (set (match_operand:SI 0 "register_operand" "=r")
++ (div:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))
++ (set (match_operand:SI 3 "register_operand" "=r")
++ (mod:SI (match_dup 1)
++ (match_dup 2)))])
++ (use (match_dup 4))])]
++ ""
++ {
++ if (can_create_pseudo_p ()) {
++ operands[4] = gen_reg_rtx (DImode);
++ emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2]));
++ emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
++ emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
++ DONE;
++ } else {
++ FAIL;
++ }
++ })
++
++
++(define_insn "divmodsi4_internal"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unspec:DI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")]
++ UNSPEC_DIVMODSI4_INTERNAL))]
++ ""
++ "divs %0, %1, %2"
++ [(set_attr "type" "div")
++ (set_attr "cc" "none")])
++
++
++;;=============================================================================
++;; udivmod
++;;-----------------------------------------------------------------------------
++;; Unsigned division that produces both a quotient and a remainder.
++;;=============================================================================
++(define_expand "udivmodsi4"
++ [(parallel [
++ (parallel [
++ (set (match_operand:SI 0 "register_operand" "=r")
++ (udiv:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))
++ (set (match_operand:SI 3 "register_operand" "=r")
++ (umod:SI (match_dup 1)
++ (match_dup 2)))])
++ (use (match_dup 4))])]
++ ""
++ {
++ if (can_create_pseudo_p ()) {
++ operands[4] = gen_reg_rtx (DImode);
++
++ emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2]));
++ emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
++ emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
++
++ DONE;
++ } else {
++ FAIL;
++ }
++ })
++
++(define_insn "udivmodsi4_internal"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unspec:DI [(match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")]
++ UNSPEC_UDIVMODSI4_INTERNAL))]
++ ""
++ "divu %0, %1, %2"
++ [(set_attr "type" "div")
++ (set_attr "cc" "none")])
++
++
++;;=============================================================================
++;; Arithmetic-shift left
++;;-----------------------------------------------------------------------------
++;; Arithmetic-shift reg0 left by reg2 or immediate value.
++;;=============================================================================
++
++(define_insn "ashlsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
++ (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r")
++ (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
++ ""
++ "@
++ lsl %0, %1, %2
++ lsl %0, %2
++ lsl %0, %1, %2"
++ [(set_attr "length" "4,2,4")
++ (set_attr "cc" "set_ncz")])
++
++;;=============================================================================
++;; Arithmetic-shift right
++;;-----------------------------------------------------------------------------
++;; Arithmetic-shift reg0 right by an immediate value.
++;;=============================================================================
++
++(define_insn "ashrsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
++ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
++ (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
++ ""
++ "@
++ asr %0, %1, %2
++ asr %0, %2
++ asr %0, %1, %2"
++ [(set_attr "length" "4,2,4")
++ (set_attr "cc" "set_ncz")])
++
++;;=============================================================================
++;; Logical shift right
++;;-----------------------------------------------------------------------------
++;; Logical shift reg0 right by an immediate value.
++;;=============================================================================
++
++(define_insn "lshrsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
++ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
++ (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
++ ""
++ "@
++ lsr %0, %1, %2
++ lsr %0, %2
++ lsr %0, %1, %2"
++ [(set_attr "length" "4,2,4")
++ (set_attr "cc" "set_ncz")])
++
++
++;;=============================================================================
++;; neg
++;;-----------------------------------------------------------------------------
++;; Negate operand 1 and store the result in operand 0.
++;;=============================================================================
++(define_insn "negsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (neg:SI (match_operand:SI 1 "register_operand" "0,r")))]
++ ""
++ "@
++ neg\t%0
++ rsub\t%0, %1, 0"
++ [(set_attr "length" "2,4")
++ (set_attr "cc" "set_vncz")])
++
++(define_insn "negsi2_predicable"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (neg:SI (match_dup 0)))]
++ "TARGET_V2_INSNS"
++ "rsub%?\t%0, 0"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
++
++;;=============================================================================
++;; abs
++;;-----------------------------------------------------------------------------
++;; Store the absolute value of operand 1 into operand 0.
++;;=============================================================================
++(define_insn "abssi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (abs:SI (match_operand:SI 1 "register_operand" "0")))]
++ ""
++ "abs\t%0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "set_z")])
++
++
++;;=============================================================================
++;; one_cmpl
++;;-----------------------------------------------------------------------------
++;; Store the bitwise-complement of operand 1 into operand 0.
++;;=============================================================================
++
++(define_insn "one_cmplsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (not:SI (match_operand:SI 1 "register_operand" "0,r")))]
++ ""
++ "@
++ com\t%0
++ rsub\t%0, %1, -1"
++ [(set_attr "length" "2,4")
++ (set_attr "cc" "set_z")])
++
++
++(define_insn "one_cmplsi2_predicable"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (not:SI (match_dup 0)))]
++ "TARGET_V2_INSNS"
++ "rsub%?\t%0, -1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "predicable" "yes")])
++
++
++;;=============================================================================
++;; Bit load
++;;-----------------------------------------------------------------------------
++;; Load a bit into Z and C flags
++;;=============================================================================
++(define_insn "bldsi"
++ [(set (cc0)
++ (and:SI (match_operand:SI 0 "register_operand" "r")
++ (match_operand:SI 1 "one_bit_set_operand" "i")))]
++ ""
++ "bld\t%0, %p1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "bld")]
++ )
++
++
++;;=============================================================================
++;; Compare
++;;-----------------------------------------------------------------------------
++;; Compare reg0 with reg1 or an immediate value.
++;;=============================================================================
++
++(define_expand "cmp<mode>"
++ [(set (cc0)
++ (compare:CMP
++ (match_operand:CMP 0 "register_operand" "")
++ (match_operand:CMP 1 "<CMP:cmp_predicate>" "")))]
++ ""
++ "{
++ avr32_compare_op0 = operands[0];
++ avr32_compare_op1 = operands[1];
++ }"
++)
++
++(define_insn "cmp<mode>_internal"
++ [(set (cc0)
++ (compare:CMP
++ (match_operand:CMP 0 "register_operand" "r")
++ (match_operand:CMP 1 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")))]
++ ""
++ {
++switch(GET_MODE(operands[0]))
++ {
++ case QImode:
++ avr32_branch_type = CMP_QI;
++ break;
++ case HImode:
++ avr32_branch_type = CMP_HI;
++ break;
++ case SImode:
++ avr32_branch_type = CMP_SI;
++ break;
++ case DImode:
++ avr32_branch_type = CMP_DI;
++ break;
++ default:
++ abort();
++ }
++ /* Check if the next insn already will output a compare. */
++ if (!next_insn_emits_cmp (insn))
++ set_next_insn_cond(insn,
++ avr32_output_cmp(get_next_insn_cond(insn), GET_MODE (operands[0]), operands[0], operands[1]));
++ return "";
++ }
++ [(set_attr "length" "4")
++ (set_attr "cc" "compare")])
++
++(define_expand "cmpsf"
++ [(set (cc0)
++ (compare:SF
++ (match_operand:SF 0 "general_operand" "")
++ (match_operand:SF 1 "general_operand" "")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "{
++ if ( !REG_P(operands[0]) )
++ operands[0] = force_reg(SFmode, operands[0]);
++
++ if ( !REG_P(operands[1]) )
++ operands[1] = force_reg(SFmode, operands[1]);
++
++ avr32_compare_op0 = operands[0];
++ avr32_compare_op1 = operands[1];
++ emit_insn(gen_cmpsf_internal_uc3fp(operands[0], operands[1]));
++ DONE;
++ }"
++)
++
++;;;=============================================================================
++;; Test if zero
++;;-----------------------------------------------------------------------------
++;; Compare reg against zero and set the condition codes.
++;;=============================================================================
++
++
++(define_expand "tstsi"
++ [(set (cc0)
++ (match_operand:SI 0 "register_operand" ""))]
++ ""
++ {
++ avr32_compare_op0 = operands[0];
++ avr32_compare_op1 = const0_rtx;
++ }
++)
++
++(define_insn "tstsi_internal"
++ [(set (cc0)
++ (match_operand:SI 0 "register_operand" "r"))]
++ ""
++ {
++ /* Check if the next insn already will output a compare. */
++ if (!next_insn_emits_cmp (insn))
++ set_next_insn_cond(insn,
++ avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx));
++
++ return "";
++ }
++ [(set_attr "length" "2")
++ (set_attr "cc" "compare")])
++
++
++(define_expand "tstdi"
++ [(set (cc0)
++ (match_operand:DI 0 "register_operand" ""))]
++ ""
++ {
++ avr32_compare_op0 = operands[0];
++ avr32_compare_op1 = const0_rtx;
++ }
++)
++
++(define_insn "tstdi_internal"
++ [(set (cc0)
++ (match_operand:DI 0 "register_operand" "r"))]
++ ""
++ {
++ /* Check if the next insn already will output a compare. */
++ if (!next_insn_emits_cmp (insn))
++ set_next_insn_cond(insn,
++ avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx));
++ return "";
++ }
++ [(set_attr "length" "4")
++ (set_attr "type" "alu2")
++ (set_attr "cc" "compare")])
++
++
++
++;;=============================================================================
++;; Convert operands
++;;-----------------------------------------------------------------------------
++;;
++;;=============================================================================
++(define_insn "truncdisi2"
++ [(set (match_operand:SI 0 "general_operand" "")
++ (truncate:SI (match_operand:DI 1 "general_operand" "")))]
++ ""
++ "truncdisi2")
++
++;;=============================================================================
++;; Extend
++;;-----------------------------------------------------------------------------
++;;
++;;=============================================================================
++
++
++(define_insn "extendhisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
++ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ return "casts.h\t%0";
++ case 1:
++ return "bfexts\t%0, %1, 0, 16";
++ case 2:
++ case 3:
++ return "ld.sh\t%0, %1";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "2,4,2,4")
++ (set_attr "cc" "set_ncz,set_ncz,none,none")
++ (set_attr "type" "alu,alu,load_rm,load_rm")])
++
++(define_insn "extendqisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
++ (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ return "casts.b\t%0";
++ case 1:
++ return "bfexts\t%0, %1, 0, 8";
++ case 2:
++ case 3:
++ return "ld.sb\t%0, %1";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "2,4,2,4")
++ (set_attr "cc" "set_ncz,set_ncz,none,none")
++ (set_attr "type" "alu,alu,load_rm,load_rm")])
++
++(define_insn "extendqihi2"
++ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
++ (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ return "casts.b\t%0";
++ case 1:
++ return "bfexts\t%0, %1, 0, 8";
++ case 2:
++ case 3:
++ return "ld.sb\t%0, %1";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "2,4,2,4")
++ (set_attr "cc" "set_ncz,set_ncz,none,none")
++ (set_attr "type" "alu,alu,load_rm,load_rm")])
++
++
++;;=============================================================================
++;; Zero-extend
++;;-----------------------------------------------------------------------------
++;;
++;;=============================================================================
++
++(define_insn "zero_extendhisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
++ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ return "castu.h\t%0";
++ case 1:
++ return "bfextu\t%0, %1, 0, 16";
++ case 2:
++ case 3:
++ return "ld.uh\t%0, %1";
++ default:
++ abort();
++ }
++ }
++
++ [(set_attr "length" "2,4,2,4")
++ (set_attr "cc" "set_ncz,set_ncz,none,none")
++ (set_attr "type" "alu,alu,load_rm,load_rm")])
++
++(define_insn "zero_extendqisi2"
++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
++ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ return "castu.b\t%0";
++ case 1:
++ return "bfextu\t%0, %1, 0, 8";
++ case 2:
++ case 3:
++ return "ld.ub\t%0, %1";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "2,4,2,4")
++ (set_attr "cc" "set_ncz, set_ncz, none, none")
++ (set_attr "type" "alu, alu, load_rm, load_rm")])
++
++(define_insn "zero_extendqihi2"
++ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
++ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ return "castu.b\t%0";
++ case 1:
++ return "bfextu\t%0, %1, 0, 8";
++ case 2:
++ case 3:
++ return "ld.ub\t%0, %1";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "2,4,2,4")
++ (set_attr "cc" "set_ncz, set_ncz, none, none")
++ (set_attr "type" "alu, alu, load_rm, load_rm")])
++
++
++;;=============================================================================
++;; Conditional load and extend insns
++;;=============================================================================
++(define_insn "ldsi<mode>_predicable_se"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (sign_extend:SI
++ (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
++ "TARGET_V2_INSNS"
++ "ld<INTM:load_postfix_s>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "ldsi<mode>_predicable_ze"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (zero_extend:SI
++ (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
++ "TARGET_V2_INSNS"
++ "ld<INTM:load_postfix_u>%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "ldhi_predicable_ze"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (zero_extend:HI
++ (match_operand:QI 1 "memory_operand" "RKs10")))]
++ "TARGET_V2_INSNS"
++ "ld.ub%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++(define_insn "ldhi_predicable_se"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (sign_extend:HI
++ (match_operand:QI 1 "memory_operand" "RKs10")))]
++ "TARGET_V2_INSNS"
++ "ld.sb%?\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "cmp_cond_insn")
++ (set_attr "type" "load")
++ (set_attr "predicable" "yes")]
++)
++
++;;=============================================================================
++;; Conditional set register
++;; sr{cond4} rd
++;;-----------------------------------------------------------------------------
++
++;;Because of the same issue as with conditional moves and adds we must
++;;not separate the compare instrcution from the scc instruction as
++;;they might be sheduled "badly".
++
++(define_expand "s<code>"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_cond:SI (cc0)
++ (const_int 0)))]
++""
++{
++ if(TARGET_HARD_FLOAT && TARGET_ARCH_FPU)
++ FAIL;
++})
++
++(define_insn "*s<code>"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_cond:SI (cc0)
++ (const_int 0)))]
++ ""
++{
++ return "sr<cond>\t%0";
++}
++[(set_attr "length" "2")
++(set_attr "cc" "none")])
++
++(define_insn "seq"
++[(set (match_operand:SI 0 "register_operand" "=r")
++(eq:SI (cc0)
++ (const_int 0)))]
++ ""
++"sreq\t%0"
++[(set_attr "length" "2")
++(set_attr "cc" "none")])
++
++(define_insn "sne"
++[(set (match_operand:SI 0 "register_operand" "=r")
++(ne:SI (cc0)
++ (const_int 0)))]
++ ""
++"srne\t%0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "none")])
++
++(define_insn "smi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(cc0)
++ (const_int 0)] UNSPEC_COND_MI))]
++ ""
++ "srmi\t%0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "none")])
++
++(define_insn "spl"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec:SI [(cc0)
++ (const_int 0)] UNSPEC_COND_PL))]
++ ""
++ "srpl\t%0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "none")])
++
++
++;;=============================================================================
++;; Conditional branch
++;;-----------------------------------------------------------------------------
++;; Branch to label if the specified condition codes are set.
++;;=============================================================================
++; branch if negative
++(define_insn "bmi"
++ [(set (pc)
++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ "brmi %0"
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++(define_insn "*bmi-reverse"
++ [(set (pc)
++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
++ (pc)
++ (label_ref (match_operand 0 "" ""))))]
++ ""
++ "brpl %0"
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++; branch if positive
++(define_insn "bpl"
++ [(set (pc)
++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ "brpl %0"
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++(define_insn "*bpl-reverse"
++ [(set (pc)
++ (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
++ (pc)
++ (label_ref (match_operand 0 "" ""))))]
++ ""
++ "brmi %0"
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++; branch if equal
++(define_insn "b<code>"
++ [(set (pc)
++ (if_then_else (any_cond_b:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ {
++ if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
++ return get_attr_length(insn) == 6 ? "brvs .+6\;br<cond> %0" : "brvs .+8\;br<cond> %0";
++ else
++ return "br<cond> %0";
++ }
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (if_then_else (eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
++ (if_then_else
++ (and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 6)
++ (const_int 8))
++ (if_then_else
++ (and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)
++ (const_int 4))))
++ (set_attr "cc" "none")])
++
++(define_insn "beq"
++ [(set (pc)
++ (if_then_else (eq:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ "breq %0";
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++(define_insn "bne"
++ [(set (pc)
++ (if_then_else (ne:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ "brne %0";
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++(define_insn "b<code>"
++ [(set (pc)
++ (if_then_else (any_cond4:CC (cc0)
++ (const_int 0))
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ {
++ if(TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
++ return "brvs .+8\;br<cond> %l0";
++ else
++ return "br<cond> %l0";
++ }
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
++ (const_int 8)]
++ (const_int 4)))
++ (set_attr "cc" "none")])
++
++(define_insn "*b<code>-reverse"
++ [(set (pc)
++ (if_then_else (any_cond_b:CC (cc0)
++ (const_int 0))
++ (pc)
++ (label_ref (match_operand 0 "" ""))))]
++ ""
++ {
++ if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
++ return "brvs %0\;br<invcond> %0";
++ else
++ return "br<invcond> %0";
++ }
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (if_then_else (eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
++ (if_then_else
++ (and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 6)
++ (const_int 8))
++ (if_then_else
++ (and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)
++ (const_int 4))))
++ (set_attr "cc" "none")])
++
++(define_insn "*beq-reverse"
++ [(set (pc)
++ (if_then_else (eq:CC (cc0)
++ (const_int 0))
++ (pc)
++ (label_ref (match_operand 0 "" ""))))]
++ ""
++ "brne %0";
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++(define_insn "*bne-reverse"
++ [(set (pc)
++ (if_then_else (ne:CC (cc0)
++ (const_int 0))
++ (pc)
++ (label_ref (match_operand 0 "" ""))))]
++ ""
++ "breq %0";
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
++ (le (minus (pc) (match_dup 0)) (const_int 256)))
++ (const_int 2)] ; use compact branch
++ (const_int 4))) ; use extended branch
++ (set_attr "cc" "none")])
++
++(define_insn "*b<code>-reverse"
++ [(set (pc)
++ (if_then_else (any_cond4:CC (cc0)
++ (const_int 0))
++ (pc)
++ (label_ref (match_operand 0 "" ""))))]
++ ""
++ {
++ if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
++ return "brvs %l0\;br<invcond> %l0";
++ else
++ return "br<invcond> %0";
++ }
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
++ (const_int 8)]
++ (const_int 4)))
++ (set_attr "cc" "none")])
++
++;=============================================================================
++; Conditional Add/Subtract
++;-----------------------------------------------------------------------------
++; sub{cond4} Rd, imm
++;=============================================================================
++
++
++(define_expand "add<mode>cc"
++ [(set (match_operand:ADDCC 0 "register_operand" "")
++ (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
++ [(match_dup 4)
++ (match_dup 5)])
++ (match_operand:ADDCC 2 "register_operand" "")
++ (plus:ADDCC
++ (match_dup 2)
++ (match_operand:ADDCC 3 "" ""))))]
++ ""
++ {
++ if ( !(GET_CODE (operands[3]) == CONST_INT
++ || (TARGET_V2_INSNS && REG_P(operands[3]))) ){
++ FAIL;
++ }
++
++ /* Delete compare instruction as it is merged into this instruction */
++ remove_insn (get_last_insn_anywhere ());
++
++ operands[4] = avr32_compare_op0;
++ operands[5] = avr32_compare_op1;
++
++ if ( TARGET_V2_INSNS
++ && REG_P(operands[3])
++ && REGNO(operands[0]) != REGNO(operands[2]) ){
++ emit_move_insn (operands[0], operands[2]);
++ operands[2] = operands[0];
++ }
++ }
++ )
++
++(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>_reg"
++ [(set (match_operand:ADDCC 0 "register_operand" "=r")
++ (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
++ [(match_operand:CMP 4 "register_operand" "r")
++ (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
++ (match_dup 0)
++ (plus:ADDCC
++ (match_operand:ADDCC 2 "register_operand" "r")
++ (match_operand:ADDCC 3 "register_operand" "r"))))]
++ "TARGET_V2_INSNS"
++ {
++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
++ return "add%i1\t%0, %2, %3";
++ }
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")])
++
++(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
++ [(set (match_operand:ADDCC 0 "register_operand" "=r")
++ (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
++ [(match_operand:CMP 4 "register_operand" "r")
++ (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
++ (match_operand:ADDCC 2 "register_operand" "0")
++ (plus:ADDCC
++ (match_dup 2)
++ (match_operand:ADDCC 3 "avr32_cond_immediate_operand" "Is08"))))]
++ ""
++ {
++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
++ return "sub%i1\t%0, -%3";
++ }
++ [(set_attr "length" "8")
++ (set_attr "cc" "cmp_cond_insn")])
++
++;=============================================================================
++; Conditional Move
++;-----------------------------------------------------------------------------
++; mov{cond4} Rd, (Rs/imm)
++;=============================================================================
++(define_expand "mov<mode>cc"
++ [(set (match_operand:MOVCC 0 "register_operand" "")
++ (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
++ [(match_dup 4)
++ (match_dup 5)])
++ (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "")
++ (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "")))]
++ ""
++ {
++ /* Delete compare instruction as it is merged into this instruction */
++ remove_insn (get_last_insn_anywhere ());
++
++ operands[4] = avr32_compare_op0;
++ operands[5] = avr32_compare_op1;
++ }
++ )
++
++
++(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>"
++ [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r")
++ (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
++ [(match_operand:CMP 4 "register_operand" "r,r,r")
++ (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>,<CMP:cmp_constraint>,<CMP:cmp_constraint>")])
++ (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "0, rKs08,rKs08")
++ (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "rKs08,0,rKs08")))]
++ ""
++ {
++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
++
++ switch( which_alternative ){
++ case 0:
++ return "mov%i1 %0, %3";
++ case 1:
++ return "mov%1 %0, %2";
++ case 2:
++ return "mov%1 %0, %2\;mov%i1 %0, %3";
++ default:
++ abort();
++ }
++
++ }
++ [(set_attr "length" "8,8,12")
++ (set_attr "cc" "cmp_cond_insn")])
++
++
++
++
++;;=============================================================================
++;; jump
++;;-----------------------------------------------------------------------------
++;; Jump inside a function; an unconditional branch to a label.
++;;=============================================================================
++(define_insn "jump"
++ [(set (pc)
++ (label_ref (match_operand 0 "" "")))]
++ ""
++ {
++ if (get_attr_length(insn) > 4)
++ return "Can't jump this far";
++ return (get_attr_length(insn) == 2 ?
++ "rjmp %0" : "bral %0");
++ }
++ [(set_attr "type" "branch")
++ (set (attr "length")
++ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022))
++ (le (minus (pc) (match_dup 0)) (const_int 1024)))
++ (const_int 2) ; use rjmp
++ (le (match_dup 0) (const_int 1048575))
++ (const_int 4)] ; use bral
++ (const_int 8))) ; do something else
++ (set_attr "cc" "none")])
++
++;;=============================================================================
++;; call
++;;-----------------------------------------------------------------------------
++;; Subroutine call instruction returning no value.
++;;=============================================================================
++(define_insn "call_internal"
++ [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W"))
++ (match_operand 1 "" ""))
++ (clobber (reg:SI LR_REGNUM))])]
++ ""
++ {
++
++ /* Check for a flashvault call. */
++ if (avr32_flashvault_call (SYMBOL_REF_DECL (operands[0])))
++ {
++ /* Assembly is already emitted. */
++ return "";
++ }
++
++ switch (which_alternative) {
++ case 0:
++ return "icall\t%0";
++ case 1:
++ return "rcall\t%0";
++ case 2:
++ return "mcall\t%0";
++ case 3:
++ if (TARGET_HAS_ASM_ADDR_PSEUDOS)
++ return "call\t%0";
++ else
++ return "mcall\tr6[%0@got]";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "type" "call")
++ (set_attr "length" "2,4,4,10")
++ (set_attr "cc" "clobber")])
++
++
++(define_expand "call"
++ [(parallel [(call (match_operand:SI 0 "" "")
++ (match_operand 1 "" ""))
++ (clobber (reg:SI LR_REGNUM))])]
++ ""
++ {
++ rtx call_address;
++ if ( GET_CODE(operands[0]) != MEM )
++ FAIL;
++
++ call_address = XEXP(operands[0], 0);
++
++ /* If assembler supports call pseudo insn and the call address is a symbol then nothing special needs to be done. */
++ if (TARGET_HAS_ASM_ADDR_PSEUDOS && (GET_CODE(call_address) == SYMBOL_REF) )
++ {
++ /* We must however mark the function as using the GOT if flag_pic is set, since the call insn might turn into a mcall using the GOT ptr register. */
++ if (flag_pic)
++ {
++ crtl->uses_pic_offset_table = 1;
++ emit_call_insn(gen_call_internal(call_address, operands[1]));
++ DONE;
++ }
++ }
++ else
++ {
++ if (flag_pic && GET_CODE(call_address) == SYMBOL_REF )
++ {
++ crtl->uses_pic_offset_table = 1;
++ emit_call_insn(gen_call_internal(call_address, operands[1]));
++ DONE;
++ }
++
++ if (!SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) )
++ {
++ if (optimize_size && GET_CODE(call_address) == SYMBOL_REF )
++ {
++ call_address = force_const_mem(SImode, call_address);
++ }
++ else
++ {
++ call_address = force_reg(SImode, call_address);
++ }
++ }
++ }
++ emit_call_insn(gen_call_internal(call_address, operands[1]));
++ DONE;
++
++ }
++)
++
++;;=============================================================================
++;; call_value
++;;-----------------------------------------------------------------------------
++;; Subroutine call instruction returning a value.
++;;=============================================================================
++(define_expand "call_value"
++ [(parallel [(set (match_operand:SI 0 "" "")
++ (call (match_operand:SI 1 "" "")
++ (match_operand 2 "" "")))
++ (clobber (reg:SI LR_REGNUM))])]
++ ""
++ {
++ rtx call_address;
++ if ( GET_CODE(operands[1]) != MEM )
++ FAIL;
++
++ call_address = XEXP(operands[1], 0);
++
++ /* Check for a flashvault call.
++ if (GET_CODE (call_address) == SYMBOL_REF
++ && avr32_flashvault_call (SYMBOL_REF_DECL (call_address)))
++ DONE;
++
++ */
++
++ /* If assembler supports call pseudo insn and the call
++ address is a symbol then nothing special needs to be done. */
++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS
++ && (GET_CODE(call_address) == SYMBOL_REF) ){
++ /* We must however mark the function as using the GOT if
++ flag_pic is set, since the call insn might turn into
++ a mcall using the GOT ptr register. */
++ if ( flag_pic ) {
++ crtl->uses_pic_offset_table = 1;
++ emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
++ DONE;
++ }
++ } else {
++ if ( flag_pic &&
++ GET_CODE(call_address) == SYMBOL_REF ){
++ crtl->uses_pic_offset_table = 1;
++ emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
++ DONE;
++ }
++
++ if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){
++ if ( optimize_size &&
++ GET_CODE(call_address) == SYMBOL_REF){
++ call_address = force_const_mem(SImode, call_address);
++ } else {
++ call_address = force_reg(SImode, call_address);
++ }
++ }
++ }
++ emit_call_insn(gen_call_value_internal(operands[0], call_address,
++ operands[2]));
++ DONE;
++
++ })
++
++(define_insn "call_value_internal"
++ [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r")
++ (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W"))
++ (match_operand 2 "" "")))
++ (clobber (reg:SI LR_REGNUM))])]
++ ;; Operand 2 not used on the AVR32.
++ ""
++ {
++ /* Check for a flashvault call. */
++ if (avr32_flashvault_call (SYMBOL_REF_DECL (operands[1])))
++ {
++ /* Assembly is already emitted. */
++ return "";
++ }
++
++
++ switch (which_alternative) {
++ case 0:
++ return "icall\t%1";
++ case 1:
++ return "rcall\t%1";
++ case 2:
++ return "mcall\t%1";
++ case 3:
++ if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
++ return "call\t%1";
++ else
++ return "mcall\tr6[%1@got]";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "type" "call")
++ (set_attr "length" "2,4,4,10")
++ (set_attr "cc" "call_set")])
++
++
++;;=============================================================================
++;; untyped_call
++;;-----------------------------------------------------------------------------
++;; Subrutine call instruction returning a value of any type.
++;; The code is copied from m68k.md (except gen_blockage is removed)
++;; Fixme!
++;;=============================================================================
++(define_expand "untyped_call"
++ [(parallel [(call (match_operand 0 "avr32_call_operand" "")
++ (const_int 0))
++ (match_operand 1 "" "")
++ (match_operand 2 "" "")])]
++ ""
++ {
++ int i;
++
++ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
++
++ for (i = 0; i < XVECLEN (operands[2], 0); i++) {
++ rtx set = XVECEXP (operands[2], 0, i);
++ emit_move_insn (SET_DEST (set), SET_SRC (set));
++ }
++
++ /* The optimizer does not know that the call sets the function value
++ registers we stored in the result block. We avoid problems by
++ claiming that all hard registers are used and clobbered at this
++ point. */
++ emit_insn (gen_blockage ());
++
++ DONE;
++ })
++
++
++;;=============================================================================
++;; return
++;;=============================================================================
++
++(define_insn "return"
++ [(return)]
++ "USE_RETURN_INSN (FALSE)"
++ {
++ avr32_output_return_instruction(TRUE, FALSE, NULL, NULL);
++ return "";
++ }
++ [(set_attr "length" "4")
++ (set_attr "type" "call")]
++ )
++
++
++(define_insn "return_cond"
++ [(set (pc)
++ (if_then_else (match_operand 0 "avr32_comparison_operand" "")
++ (return)
++ (pc)))]
++ "USE_RETURN_INSN (TRUE)"
++ "ret%0\tr12";
++ [(set_attr "type" "call")])
++
++(define_insn "return_cond_predicable"
++ [(return)]
++ "USE_RETURN_INSN (TRUE)"
++ "ret%?\tr12";
++ [(set_attr "type" "call")
++ (set_attr "predicable" "yes")])
++
++
++(define_insn "return_imm"
++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
++ (use (reg RETVAL_REGNUM))
++ (return)])]
++ "USE_RETURN_INSN (FALSE) &&
++ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
++ {
++ avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]);
++ return "";
++ }
++ [(set_attr "length" "4")
++ (set_attr "type" "call")]
++ )
++
++(define_insn "return_imm_cond"
++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
++ (use (reg RETVAL_REGNUM))
++ (set (pc)
++ (if_then_else (match_operand 1 "avr32_comparison_operand" "")
++ (return)
++ (pc)))])]
++ "USE_RETURN_INSN (TRUE) &&
++ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
++ "ret%1\t%0";
++ [(set_attr "type" "call")]
++ )
++
++(define_insn "return_imm_predicable"
++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
++ (use (reg RETVAL_REGNUM))
++ (return)])]
++ "USE_RETURN_INSN (TRUE) &&
++ ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
++ "ret%?\t%0";
++ [(set_attr "type" "call")
++ (set_attr "predicable" "yes")])
++
++(define_insn "return_<mode>reg"
++ [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
++ (use (reg RETVAL_REGNUM))
++ (return)]
++ "USE_RETURN_INSN (TRUE)"
++ "ret%?\t%0";
++ [(set_attr "type" "call")
++ (set_attr "predicable" "yes")])
++
++(define_insn "return_<mode>reg_cond"
++ [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
++ (use (reg RETVAL_REGNUM))
++ (set (pc)
++ (if_then_else (match_operator 1 "avr32_comparison_operator"
++ [(cc0) (const_int 0)])
++ (return)
++ (pc)))]
++ "USE_RETURN_INSN (TRUE)"
++ "ret%1\t%0";
++ [(set_attr "type" "call")])
++
++;;=============================================================================
++;; nonlocal_goto_receiver
++;;-----------------------------------------------------------------------------
++;; For targets with a return stack we must make sure to flush the return stack
++;; since it will be corrupt after a nonlocal goto.
++;;=============================================================================
++(define_expand "nonlocal_goto_receiver"
++ [(const_int 0)]
++ "TARGET_RETURN_STACK"
++ "
++ {
++ emit_insn ( gen_frs() );
++ DONE;
++ }
++ "
++ )
++
++
++;;=============================================================================
++;; builtin_setjmp_receiver
++;;-----------------------------------------------------------------------------
++;; For pic code we need to reload the pic register.
++;; For targets with a return stack we must make sure to flush the return stack
++;; since it will probably be corrupted.
++;;=============================================================================
++(define_expand "builtin_setjmp_receiver"
++ [(label_ref (match_operand 0 "" ""))]
++ "flag_pic"
++ "
++ {
++ if ( TARGET_RETURN_STACK )
++ emit_insn ( gen_frs() );
++
++ avr32_load_pic_register ();
++ DONE;
++ }
++ "
++)
++
++
++;;=============================================================================
++;; indirect_jump
++;;-----------------------------------------------------------------------------
++;; Jump to an address in reg or memory.
++;;=============================================================================
++(define_expand "indirect_jump"
++ [(set (pc)
++ (match_operand:SI 0 "general_operand" ""))]
++ ""
++ {
++ /* One of the ops has to be in a register. */
++ if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS )
++ && !avr32_legitimate_pic_operand_p(operands[0]) )
++ operands[0] = legitimize_pic_address (operands[0], SImode, 0);
++ else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) )
++ /* If we have an address operand then this function uses the pic register. */
++ crtl->uses_pic_offset_table = 1;
++ })
++
++
++(define_insn "indirect_jump_internal"
++ [(set (pc)
++ (match_operand:SI 0 "avr32_non_rmw_general_operand" "r,m,W"))]
++ ""
++ {
++ switch( which_alternative ){
++ case 0:
++ return "mov\tpc, %0";
++ case 1:
++ if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) )
++ return "lddpc\tpc, %0";
++ else
++ return "ld.w\tpc, %0";
++ case 2:
++ if ( flag_pic )
++ return "ld.w\tpc, r6[%0@got]";
++ else
++ return "lda.w\tpc, %0";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "2,4,8")
++ (set_attr "type" "call,call,call")
++ (set_attr "cc" "none,none,clobber")])
++
++
++
++;;=============================================================================
++;; casesi and tablejump
++;;=============================================================================
++(define_insn "tablejump_add"
++ [(set (pc)
++ (plus:SI (match_operand:SI 0 "register_operand" "r")
++ (mult:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "Ku04" ))))
++ (use (label_ref (match_operand 3 "" "")))]
++ "flag_pic &&
++ ((INTVAL(operands[2]) == 0) || (INTVAL(operands[2]) == 2) ||
++ (INTVAL(operands[2]) == 4) || (INTVAL(operands[2]) == 8))"
++ "add\tpc, %0, %1 << %p2"
++ [(set_attr "length" "4")
++ (set_attr "cc" "clobber")])
++
++(define_insn "tablejump_insn"
++ [(set (pc) (match_operand:SI 0 "memory_operand" "m"))
++ (use (label_ref (match_operand 1 "" "")))]
++ "!flag_pic"
++ "ld.w\tpc, %0"
++ [(set_attr "length" "4")
++ (set_attr "type" "call")
++ (set_attr "cc" "none")])
++
++(define_expand "casesi"
++ [(match_operand:SI 0 "register_operand" "") ; index to jump on
++ (match_operand:SI 1 "const_int_operand" "") ; lower bound
++ (match_operand:SI 2 "const_int_operand" "") ; total range
++ (match_operand:SI 3 "" "") ; table label
++ (match_operand:SI 4 "" "")] ; Out of range label
++ ""
++ "
++ {
++ rtx reg;
++ rtx index = operands[0];
++ rtx low_bound = operands[1];
++ rtx range = operands[2];
++ rtx table_label = operands[3];
++ rtx oor_label = operands[4];
++
++ index = force_reg ( SImode, index );
++ if (low_bound != const0_rtx)
++ {
++ if (!avr32_const_ok_for_constraint_p(INTVAL (low_bound), 'I', \"Is21\")){
++ reg = force_reg(SImode, GEN_INT (INTVAL (low_bound)));
++ emit_insn (gen_subsi3 (reg, index,
++ reg));
++ } else {
++ reg = gen_reg_rtx (SImode);
++ emit_insn (gen_addsi3 (reg, index,
++ GEN_INT (-INTVAL (low_bound))));
++ }
++ index = reg;
++ }
++
++ if (!avr32_const_ok_for_constraint_p (INTVAL (range), 'K', \"Ks21\"))
++ range = force_reg (SImode, range);
++
++ emit_cmp_and_jump_insns ( index, range, GTU, NULL_RTX, SImode, 1, oor_label );
++ reg = gen_reg_rtx (SImode);
++ emit_move_insn ( reg, gen_rtx_LABEL_REF (VOIDmode, table_label));
++
++ if ( flag_pic )
++ emit_jump_insn ( gen_tablejump_add ( reg, index, GEN_INT(4), table_label));
++ else
++ emit_jump_insn (
++ gen_tablejump_insn ( gen_rtx_MEM ( SImode,
++ gen_rtx_PLUS ( SImode,
++ reg,
++ gen_rtx_MULT ( SImode,
++ index,
++ GEN_INT(4)))),
++ table_label));
++ DONE;
++ }"
++)
++
++
++
++(define_insn "prefetch"
++ [(prefetch (match_operand:SI 0 "avr32_ks16_address_operand" "p")
++ (match_operand 1 "const_int_operand" "")
++ (match_operand 2 "const_int_operand" ""))]
++ ""
++ {
++ return "pref\t%0";
++ }
++
++ [(set_attr "length" "4")
++ (set_attr "type" "load")
++ (set_attr "cc" "none")])
++
++
++
++;;=============================================================================
++;; prologue
++;;-----------------------------------------------------------------------------
++;; This pattern, if defined, emits RTL for entry to a function. The function
++;; entry i responsible for setting up the stack frame, initializing the frame
++;; pointer register, saving callee saved registers, etc.
++;;=============================================================================
++(define_expand "prologue"
++ [(clobber (const_int 0))]
++ ""
++ "
++ avr32_expand_prologue();
++ DONE;
++ "
++ )
++
++;;=============================================================================
++;; eh_return
++;;-----------------------------------------------------------------------------
++;; This pattern, if defined, affects the way __builtin_eh_return, and
++;; thence the call frame exception handling library routines, are
++;; built. It is intended to handle non-trivial actions needed along
++;; the abnormal return path.
++;;
++;; The address of the exception handler to which the function should
++;; return is passed as operand to this pattern. It will normally need
++;; to copied by the pattern to some special register or memory
++;; location. If the pattern needs to determine the location of the
++;; target call frame in order to do so, it may use
++;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been
++;; assigned.
++;;
++;; If this pattern is not defined, the default action will be to
++;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either
++;; that macro or this pattern needs to be defined if call frame
++;; exception handling is to be used.
++
++;; We can't expand this before we know where the link register is stored.
++(define_insn_and_split "eh_return"
++ [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")]
++ VUNSPEC_EH_RETURN)
++ (clobber (match_scratch:SI 1 "=&r"))]
++ ""
++ "#"
++ "reload_completed"
++ [(const_int 0)]
++ "
++ {
++ avr32_set_return_address (operands[0], operands[1]);
++ DONE;
++ }"
++ )
++
++
++;;=============================================================================
++;; ffssi2
++;;-----------------------------------------------------------------------------
++(define_insn "ffssi2"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ]
++ ""
++ "mov %0, %1
++ brev %0
++ clz %0, %0
++ sub %0, -1
++ cp %0, 33
++ moveq %0, 0"
++ [(set_attr "length" "18")
++ (set_attr "cc" "clobber")]
++ )
++
++
++
++;;=============================================================================
++;; swap_h
++;;-----------------------------------------------------------------------------
++(define_insn "*swap_h"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (ior:SI (ashift:SI (match_dup 0) (const_int 16))
++ (lshiftrt:SI (match_dup 0) (const_int 16))))]
++ ""
++ "swap.h %0"
++ [(set_attr "length" "2")]
++ )
++
++(define_insn_and_split "bswap_16"
++ [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r")
++ (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13")
++ (const_int 8))
++ (const_int 255))
++ (ashift:HI (and:HI (match_dup 1)
++ (const_int 255))
++ (const_int 8))))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ if ( REGNO(operands[0]) == REGNO(operands[1]))
++ return "swap.bh\t%0";
++ else
++ return "mov\t%0, %1\;swap.bh\t%0";
++ case 1:
++ return "stswp.h\t%0, %1";
++ case 2:
++ return "ldswp.sh\t%0, %1";
++ default:
++ abort();
++ }
++ }
++
++ "(reload_completed &&
++ REG_P(operands[0]) && REG_P(operands[1])
++ && (REGNO(operands[0]) != REGNO(operands[1])))"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 0)
++ (ior:HI (and:HI (lshiftrt:HI (match_dup 0)
++ (const_int 8))
++ (const_int 255))
++ (ashift:HI (and:HI (match_dup 0)
++ (const_int 255))
++ (const_int 8))))]
++ ""
++
++ [(set_attr "length" "4,4,4")
++ (set_attr "type" "alu,store,load_rm")]
++ )
++
++(define_insn_and_split "bswap_32"
++ [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
++ (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "r,r,RKs14")
++ (const_int -16777216))
++ (const_int 24))
++ (lshiftrt:SI (and:SI (match_dup 1)
++ (const_int 16711680))
++ (const_int 8)))
++ (ior:SI (ashift:SI (and:SI (match_dup 1)
++ (const_int 65280))
++ (const_int 8))
++ (ashift:SI (and:SI (match_dup 1)
++ (const_int 255))
++ (const_int 24)))))]
++ ""
++ {
++ switch ( which_alternative ){
++ case 0:
++ if ( REGNO(operands[0]) == REGNO(operands[1]))
++ return "swap.b\t%0";
++ else
++ return "#";
++ case 1:
++ return "stswp.w\t%0, %1";
++ case 2:
++ return "ldswp.w\t%0, %1";
++ default:
++ abort();
++ }
++ }
++ "(reload_completed &&
++ REG_P(operands[0]) && REG_P(operands[1])
++ && (REGNO(operands[0]) != REGNO(operands[1])))"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 0)
++ (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
++ (const_int -16777216))
++ (const_int 24))
++ (lshiftrt:SI (and:SI (match_dup 0)
++ (const_int 16711680))
++ (const_int 8)))
++ (ior:SI (ashift:SI (and:SI (match_dup 0)
++ (const_int 65280))
++ (const_int 8))
++ (ashift:SI (and:SI (match_dup 0)
++ (const_int 255))
++ (const_int 24)))))]
++ ""
++
++ [(set_attr "length" "4,4,4")
++ (set_attr "type" "alu,store,load_rm")]
++ )
++
++
++;;=============================================================================
++;; blockage
++;;-----------------------------------------------------------------------------
++;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
++;; all of memory. This blocks insns from being moved across this point.
++
++(define_insn "blockage"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
++ ""
++ ""
++ [(set_attr "length" "0")]
++)
++
++;;=============================================================================
++;; clzsi2
++;;-----------------------------------------------------------------------------
++(define_insn "clzsi2"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (clz:SI (match_operand:SI 1 "register_operand" "r"))) ]
++ ""
++ "clz %0, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "set_z")]
++ )
++
++;;=============================================================================
++;; ctzsi2
++;;-----------------------------------------------------------------------------
++(define_insn "ctzsi2"
++ [ (set (match_operand:SI 0 "register_operand" "=r,r")
++ (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ]
++ ""
++ "@
++ brev\t%0\;clz\t%0, %0
++ mov\t%0, %1\;brev\t%0\;clz\t%0, %0"
++ [(set_attr "length" "8")
++ (set_attr "cc" "set_z")]
++ )
++
++;;=============================================================================
++;; cache instructions
++;;-----------------------------------------------------------------------------
++(define_insn "cache"
++ [ (unspec_volatile [(match_operand:SI 0 "avr32_ks11_address_operand" "p")
++ (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
++ ""
++ "cache %0, %1"
++ [(set_attr "length" "4")]
++ )
++
++(define_insn "sync"
++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)]
++ ""
++ "sync %0"
++ [(set_attr "length" "4")]
++ )
++
++;;=============================================================================
++;; TLB instructions
++;;-----------------------------------------------------------------------------
++(define_insn "tlbr"
++ [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)]
++ ""
++ "tlbr"
++ [(set_attr "length" "2")]
++ )
++
++(define_insn "tlbw"
++ [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)]
++ ""
++ "tlbw"
++ [(set_attr "length" "2")]
++ )
++
++(define_insn "tlbs"
++ [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)]
++ ""
++ "tlbs"
++ [(set_attr "length" "2")]
++ )
++
++;;=============================================================================
++;; Breakpoint instruction
++;;-----------------------------------------------------------------------------
++(define_insn "breakpoint"
++ [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)]
++ ""
++ "breakpoint"
++ [(set_attr "length" "2")]
++ )
++
++
++;;=============================================================================
++;; mtsr/mfsr instruction
++;;-----------------------------------------------------------------------------
++(define_insn "mtsr"
++ [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
++ (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)]
++ ""
++ "mtsr\t%0, %1"
++ [(set_attr "length" "4")]
++ )
++
++(define_insn "mfsr"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ]
++ ""
++ "mfsr\t%0, %1"
++ [(set_attr "length" "4")]
++ )
++
++;;=============================================================================
++;; mtdr/mfdr instruction
++;;-----------------------------------------------------------------------------
++(define_insn "mtdr"
++ [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
++ (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)]
++ ""
++ "mtdr\t%0, %1"
++ [(set_attr "length" "4")]
++ )
++
++(define_insn "mfdr"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ]
++ ""
++ "mfdr\t%0, %1"
++ [(set_attr "length" "4")]
++ )
++
++;;=============================================================================
++;; musfr
++;;-----------------------------------------------------------------------------
++(define_insn "musfr"
++ [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)]
++ ""
++ "musfr\t%0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "clobber")]
++ )
++
++(define_insn "mustr"
++ [ (set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ]
++ ""
++ "mustr\t%0"
++ [(set_attr "length" "2")]
++ )
++
++(define_insn "ssrf"
++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_SSRF)]
++ ""
++ "ssrf %0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "clobber")]
++ )
++
++(define_insn "csrf"
++ [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_CSRF)]
++ ""
++ "csrf %0"
++ [(set_attr "length" "2")
++ (set_attr "cc" "clobber")]
++ )
++
++;;=============================================================================
++;; Flush Return Stack instruction
++;;-----------------------------------------------------------------------------
++(define_insn "frs"
++ [ (unspec_volatile [(const_int 0)] VUNSPEC_FRS)]
++ ""
++ "frs"
++ [(set_attr "length" "2")
++ (set_attr "cc" "none")]
++ )
++
++
++;;=============================================================================
++;; Saturation Round Scale instruction
++;;-----------------------------------------------------------------------------
++(define_insn "sats"
++ [ (set (match_operand:SI 0 "register_operand" "+r")
++ (unspec:SI [(match_dup 0)
++ (match_operand 1 "immediate_operand" "Ku05")
++ (match_operand 2 "immediate_operand" "Ku05")]
++ UNSPEC_SATS)) ]
++ "TARGET_DSP"
++ "sats\t%0 >> %1, %2"
++ [(set_attr "type" "alu_sat")
++ (set_attr "length" "4")]
++ )
++
++(define_insn "satu"
++ [ (set (match_operand:SI 0 "register_operand" "+r")
++ (unspec:SI [(match_dup 0)
++ (match_operand 1 "immediate_operand" "Ku05")
++ (match_operand 2 "immediate_operand" "Ku05")]
++ UNSPEC_SATU)) ]
++ "TARGET_DSP"
++ "satu\t%0 >> %1, %2"
++ [(set_attr "type" "alu_sat")
++ (set_attr "length" "4")]
++ )
++
++(define_insn "satrnds"
++ [ (set (match_operand:SI 0 "register_operand" "+r")
++ (unspec:SI [(match_dup 0)
++ (match_operand 1 "immediate_operand" "Ku05")
++ (match_operand 2 "immediate_operand" "Ku05")]
++ UNSPEC_SATRNDS)) ]
++ "TARGET_DSP"
++ "satrnds\t%0 >> %1, %2"
++ [(set_attr "type" "alu_sat")
++ (set_attr "length" "4")]
++ )
++
++(define_insn "satrndu"
++ [ (set (match_operand:SI 0 "register_operand" "+r")
++ (unspec:SI [(match_dup 0)
++ (match_operand 1 "immediate_operand" "Ku05")
++ (match_operand 2 "immediate_operand" "Ku05")]
++ UNSPEC_SATRNDU)) ]
++ "TARGET_DSP"
++ "sats\t%0 >> %1, %2"
++ [(set_attr "type" "alu_sat")
++ (set_attr "length" "4")]
++ )
++
++(define_insn "sleep"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_SLEEP)
++ (match_operand:SI 0 "const_int_operand" "")]
++ ""
++ "sleep %0"
++ [(set_attr "length" "1")
++ (set_attr "cc" "none")
++ ])
++
++(define_expand "delay_cycles"
++ [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "i")]
++ VUNSPEC_DELAY_CYCLES)]
++ ""
++ "
++ unsigned int cycles = UINTVAL (operands[0]);
++ if (IN_RANGE(cycles,0x10000 ,0xFFFFFFFF))
++ {
++ unsigned int msb = (cycles & 0xFFFF0000);
++ unsigned int shift = 16;
++ msb = (msb >> shift);
++ unsigned int cycles_used = (msb*0x10000);
++ emit_insn (gen_delay_cycles_2 (gen_int_mode (msb, SImode)));
++ cycles -= cycles_used;
++ }
++ if (IN_RANGE(cycles, 4, 0xFFFF))
++ {
++ unsigned int loop_count = (cycles/ 4);
++ unsigned int cycles_used = (loop_count*4);
++ emit_insn (gen_delay_cycles_1 (gen_int_mode (loop_count, SImode)));
++ cycles -= cycles_used;
++ }
++ while (cycles >= 3)
++ {
++ emit_insn (gen_nop3 ());
++ cycles -= 3;
++ }
++ if (cycles == 1 || cycles == 2)
++ {
++ while (cycles--)
++ emit_insn (gen_nop ());
++ }
++ DONE;
++ ")
++
++(define_insn "delay_cycles_1"
++[(unspec_volatile [(const_int 0)] VUNSPEC_DELAY_CYCLES_1)
++ (match_operand:SI 0 "immediate_operand" "")
++ (clobber (match_scratch:SI 1 "=&r"))]
++ ""
++ "mov\t%1, %0
++ 1: sub\t%1, 1
++ brne\t1b
++ nop"
++)
++
++(define_insn "delay_cycles_2"
++[(unspec_volatile [(const_int 0)] VUNSPEC_DELAY_CYCLES_2)
++ (match_operand:SI 0 "immediate_operand" "")
++ (clobber (match_scratch:SI 1 "=&r"))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ ""
++ "mov\t%1, %0
++ 1: mov\t%2, 16383
++ 2: sub\t%2, 1
++ brne\t2b
++ nop
++ sub\t%1, 1
++ brne\t1b
++ nop"
++)
++
++;; CPU instructions
++
++;;=============================================================================
++;; nop
++;;-----------------------------------------------------------------------------
++;; No-op instruction.
++;;=============================================================================
++(define_insn "nop"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_NOP)]
++ ""
++ "nop"
++ [(set_attr "length" "1")
++ (set_attr "type" "alu")
++ (set_attr "cc" "none")])
++
++;; NOP3
++(define_insn "nop3"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_NOP3)]
++ ""
++ "rjmp\t2"
++ [(set_attr "length" "3")
++ (set_attr "type" "alu")
++ (set_attr "cc" "none")])
++
++;; Special patterns for dealing with the constant pool
++
++(define_insn "align_4"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
++ ""
++ {
++ assemble_align (32);
++ return "";
++ }
++ [(set_attr "length" "2")]
++)
++
++
++(define_insn "consttable_start"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)]
++ ""
++ {
++ return ".cpool";
++ }
++ [(set_attr "length" "0")]
++ )
++
++(define_insn "consttable_end"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
++ ""
++ {
++ making_const_table = FALSE;
++ return "";
++ }
++ [(set_attr "length" "0")]
++)
++
++
++(define_insn "consttable_4"
++ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
++ ""
++ {
++ making_const_table = TRUE;
++ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
++ {
++ case MODE_FLOAT:
++ {
++ REAL_VALUE_TYPE r;
++ char real_string[1024];
++ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
++ real_to_decimal(real_string, &r, 1024, 0, 1);
++ asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string);
++ break;
++ }
++ default:
++ assemble_integer (operands[0], 4, 0, 1);
++ break;
++ }
++ return "";
++ }
++ [(set_attr "length" "4")]
++)
++
++(define_insn "consttable_8"
++ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
++ ""
++ {
++ making_const_table = TRUE;
++ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
++ {
++ case MODE_FLOAT:
++ {
++ REAL_VALUE_TYPE r;
++ char real_string[1024];
++ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
++ real_to_decimal(real_string, &r, 1024, 0, 1);
++ asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string);
++ break;
++ }
++ default:
++ assemble_integer(operands[0], 8, 0, 1);
++ break;
++ }
++ return "";
++ }
++ [(set_attr "length" "8")]
++)
++
++(define_insn "consttable_16"
++ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_16)]
++ ""
++ {
++ making_const_table = TRUE;
++ assemble_integer(operands[0], 16, 0, 1);
++ return "";
++ }
++ [(set_attr "length" "16")]
++)
++
++;;=============================================================================
++;; coprocessor instructions
++;;-----------------------------------------------------------------------------
++(define_insn "cop"
++ [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03")
++ (match_operand 1 "immediate_operand" "Ku04")
++ (match_operand 2 "immediate_operand" "Ku04")
++ (match_operand 3 "immediate_operand" "Ku04")
++ (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)]
++ ""
++ "cop\tcp%0, cr%1, cr%2, cr%3, %4"
++ [(set_attr "length" "4")]
++ )
++
++(define_insn "mvcrsi"
++ [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z")
++ (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
++ (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
++ VUNSPEC_MVCR)) ]
++ ""
++ "@
++ mvcr.w\tcp%1, %0, cr%2
++ stcm.w\tcp%1, %0, cr%2
++ stc.w\tcp%1, %0, cr%2"
++ [(set_attr "length" "4")]
++ )
++
++(define_insn "mvcrdi"
++ [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z")
++ (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
++ (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
++ VUNSPEC_MVCR)) ]
++ ""
++ "@
++ mvcr.d\tcp%1, %0, cr%2
++ stcm.d\tcp%1, %0, cr%2-cr%i2
++ stc.d\tcp%1, %0, cr%2"
++ [(set_attr "length" "4")]
++ )
++
++(define_insn "mvrcsi"
++ [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
++ (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
++ (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")]
++ VUNSPEC_MVRC)]
++ ""
++ {
++ switch (which_alternative){
++ case 0:
++ return "mvrc.w\tcp%0, cr%1, %2";
++ case 1:
++ return "ldcm.w\tcp%0, %2, cr%1";
++ case 2:
++ return "ldc.w\tcp%0, cr%1, %2";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "4")]
++ )
++
++(define_insn "mvrcdi"
++ [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
++ (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
++ (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")]
++ VUNSPEC_MVRC)]
++ ""
++ {
++ switch (which_alternative){
++ case 0:
++ return "mvrc.d\tcp%0, cr%1, %2";
++ case 1:
++ return "ldcm.d\tcp%0, %2, cr%1-cr%i1";
++ case 2:
++ return "ldc.d\tcp%0, cr%1, %2";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "length" "4")]
++ )
++
++;;=============================================================================
++;; epilogue
++;;-----------------------------------------------------------------------------
++;; This pattern emits RTL for exit from a function. The function exit is
++;; responsible for deallocating the stack frame, restoring callee saved
++;; registers and emitting the return instruction.
++;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead.
++;;=============================================================================
++(define_expand "epilogue"
++ [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
++ ""
++ "
++ if (USE_RETURN_INSN (FALSE)){
++ emit_jump_insn (gen_return ());
++ DONE;
++ }
++ emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
++ gen_rtvec (1,
++ gen_rtx_RETURN (VOIDmode)),
++ VUNSPEC_EPILOGUE));
++ DONE;
++ "
++ )
++
++(define_insn "*epilogue_insns"
++ [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
++ ""
++ {
++ avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
++ return "";
++ }
++ ; Length is absolute worst case
++ [(set_attr "type" "branch")
++ (set_attr "length" "12")]
++ )
++
++(define_insn "*epilogue_insns_ret_imm"
++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
++ (use (reg RETVAL_REGNUM))
++ (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
++ "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
++ {
++ avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
++ return "";
++ }
++ ; Length is absolute worst case
++ [(set_attr "type" "branch")
++ (set_attr "length" "12")]
++ )
++
++(define_insn "sibcall_epilogue"
++ [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)]
++ ""
++ {
++ avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
++ return "";
++ }
++;; Length is absolute worst case
++ [(set_attr "type" "branch")
++ (set_attr "length" "12")]
++ )
++
++(define_insn "*sibcall_epilogue_insns_ret_imm"
++ [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
++ (use (reg RETVAL_REGNUM))
++ (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])]
++ "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
++ {
++ avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
++ return "";
++ }
++ ; Length is absolute worst case
++ [(set_attr "type" "branch")
++ (set_attr "length" "12")]
++ )
++
++(define_insn "ldxi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mem:SI (plus:SI
++ (match_operand:SI 1 "register_operand" "r")
++ (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r")
++ (const_int 8)
++ (match_operand:SI 3 "immediate_operand" "Ku05"))
++ (const_int 4)))))]
++ "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8
++ || INTVAL(operands[3]) == 0)"
++ {
++ switch ( INTVAL(operands[3]) ){
++ case 0:
++ return "ld.w %0, %1[%2:b << 2]";
++ case 8:
++ return "ld.w %0, %1[%2:l << 2]";
++ case 16:
++ return "ld.w %0, %1[%2:u << 2]";
++ case 24:
++ return "ld.w %0, %1[%2:t << 2]";
++ default:
++ internal_error("illegal operand for ldxi");
++ }
++ }
++ [(set_attr "type" "load")
++ (set_attr "length" "4")
++ (set_attr "cc" "none")])
++
++
++
++
++
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; sub r8, r7, 8
++;; st.w r8[0x0], r12
++;; to
++;; sub r8, r7, 8
++;; st.w r7[-0x8], r12
++;;=============================================================================
++; (set (reg:SI 9 r8)
++; (plus:SI (reg/f:SI 6 r7)
++; (const_int ...)))
++; (set (mem:SI (reg:SI 9 r8))
++; (reg:SI 12 r12))
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (plus:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")))
++ (set (mem:SI (match_dup 0))
++ (match_operand:SI 3 "register_operand" ""))]
++ "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
++ [(set (match_dup 0)
++ (plus:SI (match_dup 1)
++ (match_dup 2)))
++ (set (mem:SI (plus:SI (match_dup 1)
++ (match_dup 2)))
++ (match_dup 3))]
++ "")
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; sub r6, r7, 4
++;; ld.w r6, r6[0x0]
++;; to
++;; sub r6, r7, 4
++;; ld.w r6, r7[-0x4]
++;;=============================================================================
++; (set (reg:SI 7 r6)
++; (plus:SI (reg/f:SI 6 r7)
++; (const_int -4 [0xfffffffc])))
++; (set (reg:SI 7 r6)
++; (mem:SI (reg:SI 7 r6)))
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (plus:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "immediate_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (mem:SI (match_dup 0)))]
++ "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
++ [(set (match_dup 0)
++ (plus:SI (match_dup 1)
++ (match_dup 2)))
++ (set (match_dup 3)
++ (mem:SI (plus:SI (match_dup 1)
++ (match_dup 2))))]
++ "")
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; ld.sb r0, r7[-0x6]
++;; cashs.b r0
++;; to
++;; ld.sb r0, r7[-0x6]
++;;=============================================================================
++(define_peephole2
++ [(set (match_operand:QI 0 "register_operand" "")
++ (match_operand:QI 1 "load_sb_memory_operand" ""))
++ (set (match_operand:SI 2 "register_operand" "")
++ (sign_extend:SI (match_dup 0)))]
++ "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))"
++ [(set (match_dup 2)
++ (sign_extend:SI (match_dup 1)))]
++ "")
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; ld.ub r0, r7[-0x6]
++;; cashu.b r0
++;; to
++;; ld.ub r0, r7[-0x6]
++;;=============================================================================
++(define_peephole2
++ [(set (match_operand:QI 0 "register_operand" "")
++ (match_operand:QI 1 "memory_operand" ""))
++ (set (match_operand:SI 2 "register_operand" "")
++ (zero_extend:SI (match_dup 0)))]
++ "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
++ [(set (match_dup 2)
++ (zero_extend:SI (match_dup 1)))]
++ "")
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; ld.sh r0, r7[-0x6]
++;; casts.h r0
++;; to
++;; ld.sh r0, r7[-0x6]
++;;=============================================================================
++(define_peephole2
++ [(set (match_operand:HI 0 "register_operand" "")
++ (match_operand:HI 1 "memory_operand" ""))
++ (set (match_operand:SI 2 "register_operand" "")
++ (sign_extend:SI (match_dup 0)))]
++ "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
++ [(set (match_dup 2)
++ (sign_extend:SI (match_dup 1)))]
++ "")
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; ld.uh r0, r7[-0x6]
++;; castu.h r0
++;; to
++;; ld.uh r0, r7[-0x6]
++;;=============================================================================
++(define_peephole2
++ [(set (match_operand:HI 0 "register_operand" "")
++ (match_operand:HI 1 "memory_operand" ""))
++ (set (match_operand:SI 2 "register_operand" "")
++ (zero_extend:SI (match_dup 0)))]
++ "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
++ [(set (match_dup 2)
++ (zero_extend:SI (match_dup 1)))]
++ "")
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; mul rd, rx, ry
++;; add rd2, rd
++;; or
++;; add rd2, rd, rd2
++;; to
++;; mac rd2, rx, ry
++;;=============================================================================
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (mult:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (plus:SI (match_dup 3)
++ (match_dup 0)))]
++ "peep2_reg_dead_p(2, operands[0])"
++ [(set (match_dup 3)
++ (plus:SI (mult:SI (match_dup 1)
++ (match_dup 2))
++ (match_dup 3)))]
++ "")
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (mult:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "register_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (plus:SI (match_dup 0)
++ (match_dup 3)))]
++ "peep2_reg_dead_p(2, operands[0])"
++ [(set (match_dup 3)
++ (plus:SI (mult:SI (match_dup 1)
++ (match_dup 2))
++ (match_dup 3)))]
++ "")
++
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Changing
++;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask
++;; to
++;; bld rs, k5
++;;
++;; If rd is dead after the operation.
++;;=============================================================================
++(define_peephole2
++ [ (set (match_operand:SI 0 "register_operand" "")
++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
++ (const_int 1)
++ (match_operand:SI 2 "immediate_operand" "")))
++ (set (cc0)
++ (match_dup 0))]
++ "peep2_reg_dead_p(2, operands[0])"
++ [(set (cc0)
++ (and:SI (match_dup 1)
++ (match_dup 2)))]
++ "operands[2] = GEN_INT(1 << INTVAL(operands[2]));")
++
++(define_peephole2
++ [ (set (match_operand:SI 0 "register_operand" "")
++ (and:SI (match_operand:SI 1 "register_operand" "")
++ (match_operand:SI 2 "one_bit_set_operand" "")))
++ (set (cc0)
++ (match_dup 0))]
++ "peep2_reg_dead_p(2, operands[0])"
++ [(set (cc0)
++ (and:SI (match_dup 1)
++ (match_dup 2)))]
++ "")
++
++;;=============================================================================
++;; Peephole optimizing
++;;-----------------------------------------------------------------------------
++;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2]
++;;
++;;=============================================================================
++
++
++(define_peephole
++ [(set (match_operand:SI 0 "register_operand" "")
++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
++ (const_int 8)
++ (match_operand:SI 2 "avr32_extract_shift_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
++ (match_operand:SI 4 "register_operand" ""))))]
++
++ "(dead_or_set_p(insn, operands[0]))"
++ {
++ switch ( INTVAL(operands[2]) ){
++ case 0:
++ return "ld.w %3, %4[%1:b << 2]";
++ case 8:
++ return "ld.w %3, %4[%1:l << 2]";
++ case 16:
++ return "ld.w %3, %4[%1:u << 2]";
++ case 24:
++ return "ld.w %3, %4[%1:t << 2]";
++ default:
++ internal_error("illegal operand for ldxi");
++ }
++ }
++ [(set_attr "type" "load")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")]
++ )
++
++
++
++(define_peephole
++ [(set (match_operand:SI 0 "register_operand" "")
++ (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255)))
++ (set (match_operand:SI 2 "register_operand" "")
++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
++ (match_operand:SI 3 "register_operand" ""))))]
++
++ "(dead_or_set_p(insn, operands[0]))"
++
++ "ld.w %2, %3[%1:b << 2]"
++ [(set_attr "type" "load")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")]
++ )
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
++ (const_int 8)
++ (match_operand:SI 2 "avr32_extract_shift_operand" "")))
++ (set (match_operand:SI 3 "register_operand" "")
++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
++ (match_operand:SI 4 "register_operand" ""))))]
++
++ "(peep2_reg_dead_p(2, operands[0]))
++ || (REGNO(operands[0]) == REGNO(operands[3]))"
++ [(set (match_dup 3)
++ (mem:SI (plus:SI
++ (match_dup 4)
++ (mult:SI (zero_extract:SI (match_dup 1)
++ (const_int 8)
++ (match_dup 2))
++ (const_int 4)))))]
++ )
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
++ (set (match_operand:SI 2 "register_operand" "")
++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
++ (match_operand:SI 3 "register_operand" ""))))]
++
++ "(peep2_reg_dead_p(2, operands[0]))
++ || (REGNO(operands[0]) == REGNO(operands[2]))"
++ [(set (match_dup 2)
++ (mem:SI (plus:SI
++ (match_dup 3)
++ (mult:SI (zero_extract:SI (match_dup 1)
++ (const_int 8)
++ (const_int 0))
++ (const_int 4)))))]
++ "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));"
++ )
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (and:SI (match_operand:SI 1 "register_operand" "")
++ (const_int 255)))
++ (set (match_operand:SI 2 "register_operand" "")
++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
++ (match_operand:SI 3 "register_operand" ""))))]
++
++ "(peep2_reg_dead_p(2, operands[0]))
++ || (REGNO(operands[0]) == REGNO(operands[2]))"
++ [(set (match_dup 2)
++ (mem:SI (plus:SI
++ (match_dup 3)
++ (mult:SI (zero_extract:SI (match_dup 1)
++ (const_int 8)
++ (const_int 0))
++ (const_int 4)))))]
++ ""
++ )
++
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
++ (const_int 24)))
++ (set (match_operand:SI 2 "register_operand" "")
++ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
++ (match_operand:SI 3 "register_operand" ""))))]
++
++ "(peep2_reg_dead_p(2, operands[0]))
++ || (REGNO(operands[0]) == REGNO(operands[2]))"
++ [(set (match_dup 2)
++ (mem:SI (plus:SI
++ (match_dup 3)
++ (mult:SI (zero_extract:SI (match_dup 1)
++ (const_int 8)
++ (const_int 24))
++ (const_int 4)))))]
++ ""
++ )
++
++
++;;************************************************
++;; ANDN
++;;
++;;************************************************
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (not:SI (match_operand:SI 1 "register_operand" "")))
++ (set (match_operand:SI 2 "register_operand" "")
++ (and:SI (match_dup 2)
++ (match_dup 0)))]
++ "peep2_reg_dead_p(2, operands[0])"
++
++ [(set (match_dup 2)
++ (and:SI (match_dup 2)
++ (not:SI (match_dup 1))
++ ))]
++ ""
++)
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (not:SI (match_operand:SI 1 "register_operand" "")))
++ (set (match_operand:SI 2 "register_operand" "")
++ (and:SI (match_dup 0)
++ (match_dup 2)
++ ))]
++ "peep2_reg_dead_p(2, operands[0])"
++
++ [(set (match_dup 2)
++ (and:SI (match_dup 2)
++ (not:SI (match_dup 1))
++ ))]
++
++ ""
++)
++
++
++;;=================================================================
++;; Addabs peephole
++;;=================================================================
++
++(define_peephole
++ [(set (match_operand:SI 2 "register_operand" "=r")
++ (abs:SI (match_operand:SI 1 "register_operand" "r")))
++ (set (match_operand:SI 0 "register_operand" "=r")
++ (plus:SI (match_operand:SI 3 "register_operand" "r")
++ (match_dup 2)))]
++ "dead_or_set_p(insn, operands[2])"
++ "addabs %0, %3, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "set_z")])
++
++(define_peephole
++ [(set (match_operand:SI 2 "register_operand" "=r")
++ (abs:SI (match_operand:SI 1 "register_operand" "r")))
++ (set (match_operand:SI 0 "register_operand" "=r")
++ (plus:SI (match_dup 2)
++ (match_operand:SI 3 "register_operand" "r")))]
++ "dead_or_set_p(insn, operands[2])"
++ "addabs %0, %3, %1"
++ [(set_attr "length" "4")
++ (set_attr "cc" "set_z")])
++
++
++;;=================================================================
++;; Detect roundings
++;;=================================================================
++
++(define_insn "*round"
++ [(set (match_operand:SI 0 "register_operand" "+r")
++ (ashiftrt:SI (plus:SI (match_dup 0)
++ (match_operand:SI 1 "immediate_operand" "i"))
++ (match_operand:SI 2 "immediate_operand" "i")))]
++ "avr32_rnd_operands(operands[1], operands[2])"
++
++ "satrnds %0 >> %2, 31"
++
++ [(set_attr "type" "alu_sat")
++ (set_attr "length" "4")]
++
++ )
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (plus:SI (match_dup 0)
++ (match_operand:SI 1 "immediate_operand" "")))
++ (set (match_dup 0)
++ (ashiftrt:SI (match_dup 0)
++ (match_operand:SI 2 "immediate_operand" "")))]
++ "avr32_rnd_operands(operands[1], operands[2])"
++
++ [(set (match_dup 0)
++ (ashiftrt:SI (plus:SI (match_dup 0)
++ (match_dup 1))
++ (match_dup 2)))]
++ )
++
++(define_peephole
++ [(set (match_operand:SI 0 "register_operand" "r")
++ (plus:SI (match_dup 0)
++ (match_operand:SI 1 "immediate_operand" "i")))
++ (set (match_dup 0)
++ (ashiftrt:SI (match_dup 0)
++ (match_operand:SI 2 "immediate_operand" "i")))]
++ "avr32_rnd_operands(operands[1], operands[2])"
++
++ "satrnds %0 >> %2, 31"
++
++ [(set_attr "type" "alu_sat")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")]
++
++ )
++
++
++;;=================================================================
++;; mcall
++;;=================================================================
++(define_peephole
++ [(set (match_operand:SI 0 "register_operand" "")
++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
++ (parallel [(call (mem:SI (match_dup 0))
++ (match_operand 2 "" ""))
++ (clobber (reg:SI LR_REGNUM))])]
++ "dead_or_set_p(insn, operands[0])"
++ "mcall %1"
++ [(set_attr "type" "call")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")]
++)
++
++(define_peephole
++ [(set (match_operand:SI 2 "register_operand" "")
++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
++ (parallel [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_dup 2))
++ (match_operand 3 "" "")))
++ (clobber (reg:SI LR_REGNUM))])]
++ "dead_or_set_p(insn, operands[2])"
++ "mcall %1"
++ [(set_attr "type" "call")
++ (set_attr "length" "4")
++ (set_attr "cc" "call_set")]
++)
++
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
++ (parallel [(call (mem:SI (match_dup 0))
++ (match_operand 2 "" ""))
++ (clobber (reg:SI LR_REGNUM))])]
++ "peep2_reg_dead_p(2, operands[0])"
++ [(parallel [(call (mem:SI (match_dup 1))
++ (match_dup 2))
++ (clobber (reg:SI LR_REGNUM))])]
++ ""
++)
++
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand" "")
++ (match_operand 1 "avr32_const_pool_ref_operand" ""))
++ (parallel [(set (match_operand 2 "register_operand" "")
++ (call (mem:SI (match_dup 0))
++ (match_operand 3 "" "")))
++ (clobber (reg:SI LR_REGNUM))])]
++ "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))"
++ [(parallel [(set (match_dup 2)
++ (call (mem:SI (match_dup 1))
++ (match_dup 3)))
++ (clobber (reg:SI LR_REGNUM))])]
++ ""
++)
++
++;;=================================================================
++;; Returning a value
++;;=================================================================
++
++
++(define_peephole
++ [(set (match_operand 0 "register_operand" "")
++ (match_operand 1 "register_operand" ""))
++ (return)]
++ "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)
++ && (REGNO(operands[1]) != LR_REGNUM)
++ && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)"
++ "retal %1"
++ [(set_attr "type" "call")
++ (set_attr "length" "2")]
++ )
++
++
++(define_peephole
++ [(set (match_operand 0 "register_operand" "r")
++ (match_operand 1 "immediate_operand" "i"))
++ (return)]
++ "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
++ ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))"
++ {
++ avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]);
++ return "";
++ }
++ [(set_attr "type" "call")
++ (set_attr "length" "4")]
++ )
++
++(define_peephole
++ [(set (match_operand 0 "register_operand" "r")
++ (match_operand 1 "immediate_operand" "i"))
++ (unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
++ "(REGNO(operands[0]) == RETVAL_REGNUM) &&
++ ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))"
++ {
++ avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]);
++ return "";
++ }
++ ; Length is absolute worst case
++ [(set_attr "type" "branch")
++ (set_attr "length" "12")]
++ )
++
++(define_peephole
++ [(set (match_operand 0 "register_operand" "=r")
++ (if_then_else (match_operator 1 "avr32_comparison_operator"
++ [(match_operand 4 "register_operand" "r")
++ (match_operand 5 "register_immediate_operand" "rKs21")])
++ (match_operand 2 "avr32_cond_register_immediate_operand" "rKs08")
++ (match_operand 3 "avr32_cond_register_immediate_operand" "rKs08")))
++ (return)]
++ "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)"
++ {
++ operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
++
++ if ( GET_CODE(operands[2]) == REG
++ && GET_CODE(operands[3]) == REG
++ && REGNO(operands[2]) != LR_REGNUM
++ && REGNO(operands[3]) != LR_REGNUM ){
++ return "ret%1 %2\;ret%i1 %3";
++ } else if ( GET_CODE(operands[2]) == REG
++ && GET_CODE(operands[3]) == CONST_INT ){
++ if ( INTVAL(operands[3]) == -1
++ || INTVAL(operands[3]) == 0
++ || INTVAL(operands[3]) == 1 ){
++ return "ret%1 %2\;ret%i1 %d3";
++ } else {
++ return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
++ }
++ } else if ( GET_CODE(operands[2]) == CONST_INT
++ && GET_CODE(operands[3]) == REG ){
++ if ( INTVAL(operands[2]) == -1
++ || INTVAL(operands[2]) == 0
++ || INTVAL(operands[2]) == 1 ){
++ return "ret%1 %d2\;ret%i1 %3";
++ } else {
++ return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
++ }
++ } else {
++ if ( (INTVAL(operands[2]) == -1
++ || INTVAL(operands[2]) == 0
++ || INTVAL(operands[2]) == 1 )
++ && (INTVAL(operands[3]) == -1
++ || INTVAL(operands[3]) == 0
++ || INTVAL(operands[3]) == 1 )){
++ return "ret%1 %d2\;ret%i1 %d3";
++ } else {
++ return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
++ }
++ }
++ }
++
++ [(set_attr "length" "10")
++ (set_attr "cc" "none")
++ (set_attr "type" "call")])
++
++
++
++;;=================================================================
++;; mulnhh.w
++;;=================================================================
++
++(define_peephole2
++ [(set (match_operand:HI 0 "register_operand" "")
++ (neg:HI (match_operand:HI 1 "register_operand" "")))
++ (set (match_operand:SI 2 "register_operand" "")
++ (mult:SI
++ (sign_extend:SI (match_dup 0))
++ (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))]
++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
++ [ (set (match_dup 2)
++ (mult:SI
++ (sign_extend:SI (neg:HI (match_dup 1)))
++ (sign_extend:SI (match_dup 3))))]
++ ""
++ )
++
++(define_peephole2
++ [(set (match_operand:HI 0 "register_operand" "")
++ (neg:HI (match_operand:HI 1 "register_operand" "")))
++ (set (match_operand:SI 2 "register_operand" "")
++ (mult:SI
++ (sign_extend:SI (match_operand:HI 3 "register_operand" ""))
++ (sign_extend:SI (match_dup 0))))]
++ "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
++ [ (set (match_dup 2)
++ (mult:SI
++ (sign_extend:SI (neg:HI (match_dup 1)))
++ (sign_extend:SI (match_dup 3))))]
++ ""
++ )
++
++
++
++;;=================================================================
++;; Vector set and extract operations
++;;=================================================================
++(define_insn "vec_setv2hi_hi"
++ [(set (match_operand:V2HI 0 "register_operand" "=r")
++ (vec_merge:V2HI
++ (match_dup 0)
++ (vec_duplicate:V2HI
++ (match_operand:HI 1 "register_operand" "r"))
++ (const_int 1)))]
++ ""
++ "bfins\t%0, %1, 16, 16"
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")])
++
++(define_insn "vec_setv2hi_lo"
++ [(set (match_operand:V2HI 0 "register_operand" "+r")
++ (vec_merge:V2HI
++ (match_dup 0)
++ (vec_duplicate:V2HI
++ (match_operand:HI 1 "register_operand" "r"))
++ (const_int 2)))]
++ ""
++ "bfins\t%0, %1, 0, 16"
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")])
++
++(define_expand "vec_setv2hi"
++ [(set (match_operand:V2HI 0 "register_operand" "")
++ (vec_merge:V2HI
++ (match_dup 0)
++ (vec_duplicate:V2HI
++ (match_operand:HI 1 "register_operand" ""))
++ (match_operand 2 "immediate_operand" "")))]
++ ""
++ { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); }
++ )
++
++(define_insn "vec_extractv2hi"
++ [(set (match_operand:HI 0 "register_operand" "=r")
++ (vec_select:HI
++ (match_operand:V2HI 1 "register_operand" "r")
++ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
++ ""
++ {
++ if ( INTVAL(operands[2]) == 0 )
++ return "bfextu\t%0, %1, 16, 16";
++ else
++ return "bfextu\t%0, %1, 0, 16";
++ }
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")])
++
++(define_insn "vec_extractv4qi"
++ [(set (match_operand:QI 0 "register_operand" "=r")
++ (vec_select:QI
++ (match_operand:V4QI 1 "register_operand" "r")
++ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
++ ""
++ {
++ switch ( INTVAL(operands[2]) ){
++ case 0:
++ return "bfextu\t%0, %1, 24, 8";
++ case 1:
++ return "bfextu\t%0, %1, 16, 8";
++ case 2:
++ return "bfextu\t%0, %1, 8, 8";
++ case 3:
++ return "bfextu\t%0, %1, 0, 8";
++ default:
++ abort();
++ }
++ }
++ [(set_attr "type" "alu")
++ (set_attr "length" "4")
++ (set_attr "cc" "clobber")])
++
++
++(define_insn "concatv2hi"
++ [(set (match_operand:V2HI 0 "register_operand" "=r, r, r")
++ (vec_concat:V2HI
++ (match_operand:HI 1 "register_operand" "r, r, 0")
++ (match_operand:HI 2 "register_operand" "r, 0, r")))]
++ ""
++ "@
++ mov\t%0, %1\;bfins\t%0, %2, 0, 16
++ bfins\t%0, %2, 0, 16
++ bfins\t%0, %1, 16, 16"
++ [(set_attr "length" "6, 4, 4")
++ (set_attr "type" "alu")])
++
++
++;; Load the atomic operation description
++(include "sync.md")
++
++;; Load the SIMD description
++(include "simd.md")
++
++;; Include the FPU for uc3
++(include "uc3fpu.md")
+--- /dev/null
++++ b/gcc/config/avr32/avr32-modes.def
+@@ -0,0 +1 @@
++VECTOR_MODES (INT, 4); /* V4QI V2HI */
+--- /dev/null
++++ b/gcc/config/avr32/avr32.opt
+@@ -0,0 +1,93 @@
++; Options for the ATMEL AVR32 port of the compiler.
++
++; Copyright 2007 Atmel Corporation.
++;
++; This file is part of GCC.
++;
++; GCC is free software; you can redistribute it and/or modify it under
++; the terms of the GNU General Public License as published by the Free
++; Software Foundation; either version 2, or (at your option) any later
++; version.
++;
++; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++; WARRANTY; without even the implied warranty of MERCHANTABILITY or
++; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++; for more details.
++;
++; You should have received a copy of the GNU General Public License
++; along with GCC; see the file COPYING. If not, write to the Free
++; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
++; 02110-1301, USA.
++
++muse-rodata-section
++Target Report Mask(USE_RODATA_SECTION)
++Use section .rodata for read-only data instead of .text.
++
++mhard-float
++Target Report Mask(HARD_FLOAT)
++Use FPU instructions instead of floating point emulation.
++
++msoft-float
++Target Report InverseMask(HARD_FLOAT, SOFT_FLOAT)
++Use floating point emulation for floating point operations.
++
++mforce-double-align
++Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN)
++Force double-word alignment for double-word memory accesses.
++
++mno-init-got
++Target Report RejectNegative Mask(NO_INIT_GOT)
++Do not initialize GOT register before using it when compiling PIC code.
++
++mrelax
++Target Report Mask(RELAX)
++Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1).
++
++mmd-reorg-opt
++Target Report Undocumented Mask(MD_REORG_OPTIMIZATION)
++Perform machine dependent optimizations in reorg stage.
++
++masm-addr-pseudos
++Target Report Mask(HAS_ASM_ADDR_PSEUDOS)
++Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default)
++
++mpart=
++Target Report RejectNegative Joined Var(avr32_part_name)
++Specify the AVR32 part name
++
++mcpu=
++Target Report RejectNegative Joined Undocumented Var(avr32_part_name)
++Specify the AVR32 part name (deprecated)
++
++march=
++Target Report RejectNegative Joined Var(avr32_arch_name)
++Specify the AVR32 architecture name
++
++mfast-float
++Target Report Mask(FAST_FLOAT)
++Enable fast floating-point library. Enabled by default if the -funsafe-math-optimizations switch is specified.
++
++mimm-in-const-pool
++Target Report Var(avr32_imm_in_const_pool) Init(-1)
++Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
++
++mno-pic
++Target Report RejectNegative Mask(NO_PIC)
++Do not generate position-independent code. (deprecated, use -fno-pic instead)
++
++mcond-exec-before-reload
++Target Report Undocumented Mask(COND_EXEC_BEFORE_RELOAD)
++Enable experimental conditional execution preparation before the reload stage.
++
++mrmw-addressable-data
++Target Report Mask(RMW_ADDRESSABLE_DATA)
++Signal that all data is in range for the Atomic Read-Modify-Write memory instructions, and that
++gcc can safely generate these whenever possible.
++
++mflashvault
++Target Var(TARGET_FLASHVAULT)
++Generate code for flashvault
++
++mlist-devices
++Target RejectNegative Var(avr32_list_supported_parts)
++Print the list of parts supported while printing --target-help.
+--- /dev/null
++++ b/gcc/config/avr32/avr32-protos.h
+@@ -0,0 +1,196 @@
++/*
++ Prototypes for exported functions defined in avr32.c
++ Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
++
++ This file is part of GCC.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++
++#ifndef AVR32_PROTOS_H
++#define AVR32_PROTOS_H
++
++extern const int swap_reg[];
++
++extern int avr32_valid_macmac_bypass (rtx, rtx);
++extern int avr32_valid_mulmac_bypass (rtx, rtx);
++
++extern int avr32_decode_lcomm_symbol_offset (rtx, int *);
++extern void avr32_encode_lcomm_symbol_offset (tree, char *, int);
++
++extern const char *avr32_strip_name_encoding (const char *);
++
++extern rtx avr32_get_note_reg_equiv (rtx insn);
++
++extern int avr32_use_return_insn (int iscond);
++
++extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string);
++
++extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string);
++extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string);
++extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string);
++
++extern void avr32_output_return_instruction (int single_ret_inst,
++ int iscond, rtx cond,
++ rtx r12_imm);
++extern void avr32_expand_prologue (void);
++extern void avr32_set_return_address (rtx source, rtx scratch);
++
++extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode);
++extern int avr32_extra_constraint_s (rtx value, const int strict);
++extern int avr32_eh_return_data_regno (const int n);
++extern int avr32_initial_elimination_offset (const int from, const int to);
++extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
++ tree type, int named);
++extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
++ rtx libname, tree fndecl);
++extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum,
++ enum machine_mode mode,
++ tree type, int named);
++#ifdef ARGS_SIZE_RTX
++/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */
++extern enum direction avr32_function_arg_padding (enum machine_mode mode,
++ tree type);
++#endif /* ARGS_SIZE_RTX */
++extern rtx avr32_function_value (tree valtype, tree func, bool outgoing);
++extern rtx avr32_libcall_value (enum machine_mode mode);
++extern int avr32_sched_use_dfa_pipeline_interface (void);
++extern bool avr32_return_in_memory (tree type, tree fntype);
++extern void avr32_regs_to_save (char *operand);
++extern void avr32_target_asm_function_prologue (FILE * file,
++ HOST_WIDE_INT size);
++extern void avr32_target_asm_function_epilogue (FILE * file,
++ HOST_WIDE_INT size);
++extern void avr32_trampoline_template (FILE * file);
++extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr,
++ rtx static_chain);
++extern int avr32_legitimate_address (enum machine_mode mode, rtx x,
++ int strict);
++extern int avr32_legitimate_constant_p (rtx x);
++
++extern int avr32_legitimate_pic_operand_p (rtx x);
++
++extern rtx avr32_find_symbol (rtx x);
++extern void avr32_select_section (rtx exp, int reloc, int align);
++extern void avr32_encode_section_info (tree decl, rtx rtl, int first);
++extern void avr32_asm_file_end (FILE * stream);
++extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len);
++extern void avr32_asm_output_common (FILE * stream, const char *name,
++ int size, int rounded);
++extern void avr32_asm_output_label (FILE * stream, const char *name);
++extern void avr32_asm_declare_object_name (FILE * stream, char *name,
++ tree decl);
++extern void avr32_asm_globalize_label (FILE * stream, const char *name);
++extern void avr32_asm_weaken_label (FILE * stream, const char *name);
++extern void avr32_asm_output_external (FILE * stream, tree decl,
++ const char *name);
++extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref);
++extern void avr32_asm_output_labelref (FILE * stream, const char *name);
++extern void avr32_notice_update_cc (rtx exp, rtx insn);
++extern void avr32_print_operand (FILE * stream, rtx x, int code);
++extern void avr32_print_operand_address (FILE * stream, rtx x);
++
++extern int avr32_symbol (rtx x);
++
++extern void avr32_select_rtx_section (enum machine_mode mode, rtx x,
++ unsigned HOST_WIDE_INT align);
++
++extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode);
++extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode);
++
++extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c,
++ const char *str);
++
++extern bool avr32_cannot_force_const_mem (rtx x);
++
++extern void avr32_init_builtins (void);
++
++extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget,
++ enum machine_mode mode, int ignore);
++
++extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type);
++
++extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca);
++
++extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum,
++ enum machine_mode mode,
++ tree type, bool named);
++
++extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from,
++ int write_back, int in_struct_p,
++ int scalar_p);
++extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to,
++ int in_struct_p, int scalar_p);
++extern int avr32_gen_movmemsi (rtx * operands);
++
++extern int avr32_rnd_operands (rtx add, rtx shift);
++extern int avr32_adjust_insn_length (rtx insn, int length);
++
++extern int symbol_mentioned_p (rtx x);
++extern int label_mentioned_p (rtx x);
++extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg);
++extern int avr32_address_register_rtx_p (rtx x, int strict_p);
++extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index,
++ int strict_p);
++
++extern int avr32_const_double_immediate (rtx value);
++extern void avr32_init_expanders (void);
++extern rtx avr32_return_addr (int count, rtx frame);
++extern bool avr32_got_mentioned_p (rtx addr);
++
++extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands);
++
++extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]);
++extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]);
++#ifdef RTX_CODE
++extern int avr32_expand_scc (RTX_CODE cond, rtx * operands);
++#endif
++
++extern int avr32_store_bypass (rtx insn_out, rtx insn_in);
++extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in);
++extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in);
++extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in);
++extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode,
++ rtx op0, rtx op1);
++
++rtx get_next_insn_cond (rtx cur_insn);
++int set_next_insn_cond (rtx cur_insn, rtx cond);
++rtx next_insn_emits_cmp (rtx cur_insn);
++void avr32_override_options (void);
++void avr32_load_pic_register (void);
++#ifdef GCC_BASIC_BLOCK_H
++rtx avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
++ int *num_true_changes);
++rtx avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test );
++void avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes);
++#endif
++void avr32_optimization_options (int level, int size);
++int avr32_const_ok_for_move (HOST_WIDE_INT c);
++
++void avr32_split_const_expr (enum machine_mode mode,
++ enum machine_mode new_mode,
++ rtx expr,
++ rtx *split_expr);
++void avr32_get_intval (enum machine_mode mode,
++ rtx const_expr,
++ HOST_WIDE_INT *val);
++
++int avr32_cond_imm_clobber_splittable (rtx insn,
++ rtx operands[]);
++
++bool avr32_flashvault_call(tree decl);
++extern void avr32_emit_swdivsf (rtx, rtx, rtx);
++
++#endif /* AVR32_PROTOS_H */
+--- /dev/null
++++ b/gcc/config/avr32/crti.asm
+@@ -0,0 +1,64 @@
++/*
++ Init/fini stuff for AVR32.
++ Copyright 2003-2006 Atmel Corporation.
++
++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
++
++ This file is part of GCC.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++
++/* The code in sections .init and .fini is supposed to be a single
++ regular function. The function in .init is called directly from
++ start in crt1.asm. The function in .fini is atexit()ed in crt1.asm
++ too.
++
++ crti.asm contributes the prologue of a function to these sections,
++ and crtn.asm comes up the epilogue. STARTFILE_SPEC should list
++ crti.o before any other object files that might add code to .init
++ or .fini sections, and ENDFILE_SPEC should list crtn.o after any
++ such object files. */
++
++ .file "crti.asm"
++
++ .section ".init"
++/* Just load the GOT */
++ .align 2
++ .global _init
++_init:
++ stm --sp, r6, lr
++ lddpc r6, 1f
++0:
++ rsub r6, pc
++ rjmp 2f
++ .align 2
++1: .long 0b - _GLOBAL_OFFSET_TABLE_
++2:
++
++ .section ".fini"
++/* Just load the GOT */
++ .align 2
++ .global _fini
++_fini:
++ stm --sp, r6, lr
++ lddpc r6, 1f
++0:
++ rsub r6, pc
++ rjmp 2f
++ .align 2
++1: .long 0b - _GLOBAL_OFFSET_TABLE_
++2:
++
+--- /dev/null
++++ b/gcc/config/avr32/crtn.asm
+@@ -0,0 +1,44 @@
++/* Copyright (C) 2001 Free Software Foundation, Inc.
++ Written By Nick Clifton
++
++ This file is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by the
++ Free Software Foundation; either version 2, or (at your option) any
++ later version.
++
++ In addition to the permissions in the GNU General Public License, the
++ Free Software Foundation gives you unlimited permission to link the
++ compiled version of this file with other programs, and to distribute
++ those programs without any restriction coming from the use of this
++ file. (The General Public License restrictions do apply in other
++ respects; for example, they cover modification of the file, and
++ distribution when not linked into another program.)
++
++ This file is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; see the file COPYING. If not, write to
++ the Free Software Foundation, 59 Temple Place - Suite 330,
++ Boston, MA 02111-1307, USA.
++
++ As a special exception, if you link this library with files
++ compiled with GCC to produce an executable, this does not cause
++ the resulting executable to be covered by the GNU General Public License.
++ This exception does not however invalidate any other reasons why
++ the executable file might be covered by the GNU General Public License.
++*/
++
++
++
++
++ .file "crtn.asm"
++
++ .section ".init"
++ ldm sp++, r6, pc
++
++ .section ".fini"
++ ldm sp++, r6, pc
++
+--- /dev/null
++++ b/gcc/config/avr32/lib1funcs.S
+@@ -0,0 +1,2903 @@
++/* Macro for moving immediate value to register. */
++.macro mov_imm reg, imm
++.if (((\imm & 0xfffff) == \imm) || ((\imm | 0xfff00000) == \imm))
++ mov \reg, \imm
++#if __AVR32_UC__ >= 2
++.elseif ((\imm & 0xffff) == 0)
++ movh \reg, hi(\imm)
++
++#endif
++.else
++ mov \reg, lo(\imm)
++ orh \reg, hi(\imm)
++.endif
++.endm
++
++
++
++/* Adjust the unpacked double number if it is a subnormal number.
++ The exponent and mantissa pair are stored
++ in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
++ the MSB is passed in [sign]. Needs two scratch
++ registers [scratch1] and [scratch2]. An adjusted and packed double float
++ is present in [mant_hi,mant_lo] after macro has executed */
++.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2
++ /* We have an exponent which is <=0 indicating a subnormal number
++ As it should be stored as if the exponent was 1 (although the
++ exponent field is all zeros to indicate a subnormal number)
++ we have to shift down the mantissa to its correct position. */
++ neg \exp
++ sub \exp,-1 /* amount to shift down */
++ cp.w \exp,54
++ brlo 50f /* if more than 53 shift steps, the
++ entire mantissa will disappear
++ without any rounding to occur */
++ mov \mant_hi, 0
++ mov \mant_lo, 0
++ rjmp 52f
++50:
++ sub \exp,-10 /* do the shift to position the
++ mantissa at the same time
++ note! this does not include the
++ final 1 step shift to add the sign */
++
++ /* when shifting, save all shifted out bits in [scratch2]. we may need to
++ look at them to make correct rounding. */
++
++ rsub \scratch1,\exp,32 /* get inverted shift count */
++ cp.w \exp,32 /* handle shifts >= 32 separately */
++ brhs 51f
++
++ /* small (<32) shift amount, both words are part of the shift */
++ lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/
++ lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
++ lsr \mant_lo,\mant_lo,\exp /* shift down lsw */
++ lsr \mant_hi,\mant_hi,\exp /* shift down msw */
++ or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */
++ rjmp 50f
++
++ /* large (>=32) shift amount, only lsw will have bits left after shift.
++ note that shift operations will use ((shift count) mod 32) so
++ we do not need to subtract 32 from shift count. */
++51:
++ lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */
++ or \scratch2,\mant_lo /* also save all bits from lsw */
++ mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */
++ mov \mant_hi,0 /* clear msw */
++ lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */
++
++50:
++ /* result is almost ready to return, except that least significant bit
++ and the part we already shifted out may cause the result to be
++ rounded */
++ bld \mant_lo,0 /* get bit to be shifted out */
++ brcc 51f /* if bit was 0, no rounding */
++
++ /* msb of part to remove is 1, so rounding depends on rest of bits */
++ tst \scratch2,\scratch2 /* get shifted out tail */
++ brne 50f /* if rest > 0, do round */
++ bld \mant_lo,1 /* we have to look at lsb in result */
++ brcc 51f /* if lsb is 0, don't round */
++
++50:
++ /* subnormal result requires rounding
++ rounding may cause subnormal to become smallest normal number
++ luckily, smallest normal number has exactly the representation
++ we got by rippling a one bit up from mantissa into exponent field. */
++ sub \mant_lo,-1
++ subcc \mant_hi,-1
++
++51:
++ /* shift and return packed double with correct sign */
++ rol \sign
++ ror \mant_hi
++ ror \mant_lo
++52:
++.endm
++
++
++/* Adjust subnormal single float number with exponent [exp]
++ and mantissa [mant] and round. */
++.macro adjust_subnormal_sf sf, exp, mant, sign, scratch
++ /* subnormal number */
++ rsub \exp,\exp, 1 /* shift amount */
++ cp.w \exp, 25
++ movhs \mant, 0
++ brhs 90f /* Return zero */
++ rsub \scratch, \exp, 32
++ lsl \scratch, \mant,\scratch/* Check if there are any bits set
++ in the bits discarded in the mantissa */
++ srne \scratch /* If so set the lsb of the shifted mantissa */
++ lsr \mant,\mant,\exp /* Shift the mantissa */
++ or \mant, \scratch /* Round lsb if any bits were shifted out */
++ /* Rounding : For explaination, see round_sf. */
++ mov \scratch, 0x7f /* Set rounding constant */
++ bld \mant, 8
++ subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */
++ add \mant, \scratch /* Add rounding constant to mantissa */
++ /* We can't overflow because mantissa is at least shifted one position
++ to the right so the implicit bit is zero. We can however get the implicit
++ bit set after rounding which means that we have the lowest normal number
++ but this is ok since this bit has the same position as the LSB of the
++ exponent */
++ lsr \sf, \mant, 7
++ /* Rotate in sign */
++ lsl \sign, 1
++ ror \sf
++90:
++.endm
++
++
++/* Round the unpacked df number with exponent [exp] and
++ mantissa [mant_hi, mant_lo]. Uses scratch register
++ [scratch] */
++.macro round_df exp, mant_lo, mant_hi, scratch
++ mov \scratch, 0x3ff /* Rounding constant */
++ bld \mant_lo,11 /* Check if lsb in the final result is
++ set */
++ subeq \scratch, -1 /* Adjust rounding constant to 0x400
++ if rounding 0.5 upwards */
++ add \mant_lo, \scratch /* Round */
++ acr \mant_hi /* If overflowing we know that
++ we have all zeros in the bits not
++ scaled out so we can leave them
++ but we must increase the exponent with
++ two since we had an implicit bit
++ which is lost + the extra overflow bit */
++ subcs \exp, -2 /* Update exponent */
++.endm
++
++/* Round single float number stored in [mant] and [exp] */
++.macro round_sf exp, mant, scratch
++ /* Round:
++ For 0.5 we round to nearest even integer
++ for all other cases we round to nearest integer.
++ This means that if the digit left of the "point" (.)
++ is 1 we can add 0x80 to the mantissa since the
++ corner case 0x180 will round up to 0x200. If the
++ digit left of the "point" is 0 we will have to
++ add 0x7f since this will give 0xff and hence a
++ truncation/rounding downwards for the corner
++ case when the 9 lowest bits are 0x080 */
++ mov \scratch, 0x7f /* Set rounding constant */
++ /* Check if the mantissa is even or odd */
++ bld \mant, 8
++ subeq \scratch, -1 /* Rounding constant should be 0x80 */
++ add \mant, \scratch
++ subcs \exp, -2 /* Adjust exponent if we overflowed */
++.endm
++
++
++
++/* Pack a single float number stored in [mant] and [exp]
++ into a single float number in [sf] */
++.macro pack_sf sf, exp, mant
++ bld \mant,31 /* implicit bit to z */
++ subne \exp,1 /* if subnormal (implicit bit 0)
++ adjust exponent to storage format */
++
++ lsr \sf, \mant, 7
++ bfins \sf, \exp, 24, 8
++.endm
++
++/* Pack exponent [exp] and mantissa [mant_hi, mant_lo]
++ into [df_hi, df_lo]. [df_hi] is shifted
++ one bit up so the sign bit can be shifted into it */
++
++.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi
++ bld \mant_hi,31 /* implicit bit to z */
++ subne \exp,1 /* if subnormal (implicit bit 0)
++ adjust exponent to storage format */
++
++ lsr \mant_lo,11 /* shift back lsw */
++ or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */
++ lsl \mant_hi,1 /* get rid of implicit bit */
++ lsr \mant_hi,11 /* shift back msw except for one step*/
++ or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */
++.endm
++
++/* Normalize single float number stored in [mant] and [exp]
++ using scratch register [scratch] */
++.macro normalize_sf exp, mant, scratch
++ /* Adjust exponent and mantissa */
++ clz \scratch, \mant
++ sub \exp, \scratch
++ lsl \mant, \mant, \scratch
++.endm
++
++/* Normalize the exponent and mantissa pair stored
++ in [mant_hi,mant_lo] and [exp]. Needs two scratch
++ registers [scratch1] and [scratch2]. */
++.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2
++ clz \scratch1,\mant_hi /* Check if we have zeros in high bits */
++ breq 80f /* No need for scaling if no zeros in high bits */
++ brcs 81f /* Check for all zeros */
++
++ /* shift amount is smaller than 32, and involves both msw and lsw*/
++ rsub \scratch2,\scratch1,32 /* shift mantissa */
++ lsl \mant_hi,\mant_hi,\scratch1
++ lsr \scratch2,\mant_lo,\scratch2
++ or \mant_hi,\scratch2
++ lsl \mant_lo,\mant_lo,\scratch1
++ sub \exp,\scratch1 /* adjust exponent */
++ rjmp 80f /* Finished */
++81:
++ /* shift amount is greater than 32 */
++ clz \scratch1,\mant_lo /* shift mantissa */
++ movcs \scratch1, 0
++ subcc \scratch1,-32
++ lsl \mant_hi,\mant_lo,\scratch1
++ mov \mant_lo,0
++ sub \exp,\scratch1 /* adjust exponent */
++80:
++.endm
++
++
++/* Fast but approximate multiply of two 64-bit numbers to give a 64 bit result.
++ The multiplication of [al]x[bl] is discarded.
++ Operands in [ah], [al], [bh], [bl].
++ Scratch registers in [sh], [sl].
++ Returns results in registers [rh], [rl].*/
++.macro mul_approx_df ah, al, bh, bl, rh, rl, sh, sl
++ mulu.d \sl, \ah, \bl
++ macu.d \sl, \al, \bh
++ mulu.d \rl, \ah, \bh
++ add \rl, \sh
++ acr \rh
++.endm
++
++
++
++#if defined(L_avr32_f64_mul) || defined(L_avr32_f64_mul_fast)
++ .align 2
++#if defined(L_avr32_f64_mul)
++ .global __avr32_f64_mul
++ .type __avr32_f64_mul,@function
++__avr32_f64_mul:
++#else
++ .global __avr32_f64_mul_fast
++ .type __avr32_f64_mul_fast,@function
++__avr32_f64_mul_fast:
++#endif
++ or r12, r10, r11 << 1
++ breq __avr32_f64_mul_op1_zero
++
++#if defined(L_avr32_f64_mul)
++ pushm r4-r7, lr
++#else
++ stm --sp, r5,r6,r7,lr
++#endif
++
++#define AVR32_F64_MUL_OP1_INT_BITS 1
++#define AVR32_F64_MUL_OP2_INT_BITS 10
++#define AVR32_F64_MUL_RES_INT_BITS 11
++
++ /* op1 in {r11,r10}*/
++ /* op2 in {r9,r8}*/
++ eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
++
++ /* Unpack op1 to 1.63 format*/
++ /* exp: r7 */
++ /* sf: r11, r10 */
++ bfextu r7, r11, 20, 11 /* Extract exponent */
++
++ mov r5, 1
++
++ /* Check if normalization is needed */
++ breq __avr32_f64_mul_op1_subnormal /*If number is subnormal, normalize it */
++
++ lsl r11, (12-AVR32_F64_MUL_OP1_INT_BITS-1) /* Extract mantissa, leave room for implicit bit */
++ or r11, r11, r10>>(32-(12-AVR32_F64_MUL_OP1_INT_BITS-1))
++ lsl r10, (12-AVR32_F64_MUL_OP1_INT_BITS-1)
++ bfins r11, r5, 32 - (1 + AVR32_F64_MUL_OP1_INT_BITS), 1 + AVR32_F64_MUL_OP1_INT_BITS /* Insert implicit bit */
++
++
++22:
++ /* Unpack op2 to 10.54 format */
++ /* exp: r6 */
++ /* sf: r9, r8 */
++ bfextu r6, r9, 20, 11 /* Extract exponent */
++
++ /* Check if normalization is needed */
++ breq __avr32_f64_mul_op2_subnormal /*If number is subnormal, normalize it */
++
++ lsl r8, 1 /* Extract mantissa, leave room for implicit bit */
++ rol r9
++ bfins r9, r5, 32 - (1 + AVR32_F64_MUL_OP2_INT_BITS), 1 + AVR32_F64_MUL_OP2_INT_BITS /* Insert implicit bit */
++
++23:
++
++ /* Check if any operands are NaN or INF */
++ cp r7, 0x7ff
++ breq __avr32_f64_mul_op_nan_or_inf /* Check op1 for NaN or Inf */
++ cp r6, 0x7ff
++ breq __avr32_f64_mul_op_nan_or_inf /* Check op2 for NaN or Inf */
++
++
++ /* Calculate new exponent in r12*/
++ add r12, r7, r6
++ sub r12, (1023-1)
++
++#if defined(L_avr32_f64_mul)
++ /* Do the multiplication.
++ Place result in [r11, r10, r7, r6]. The result is in 11.117 format. */
++ mulu.d r4, r11, r8
++ macu.d r4, r10, r9
++ mulu.d r6, r10, r8
++ mulu.d r10, r11, r9
++ add r7, r4
++ adc r10, r10, r5
++ acr r11
++#else
++ /* Do the multiplication using approximate calculation. discard the al x bl
++ calculation.
++ Place result in [r11, r10, r7]. The result is in 11.85 format. */
++
++ /* Do the multiplication using approximate calculation.
++ Place result in r11, r10. Use r7, r6 as scratch registers */
++ mulu.d r6, r11, r8
++ macu.d r6, r10, r9
++ mulu.d r10, r11, r9
++ add r10, r7
++ acr r11
++#endif
++ /* Adjust exponent and mantissa */
++ /* [r12]:exp, [r11, r10]:mant [r7, r6]:sticky bits */
++ /* Mantissa may be of the format 00000000000.0xxx or 00000000000.1xxx. */
++ /* In the first case, shift one pos to left.*/
++ bld r11, 32-AVR32_F64_MUL_RES_INT_BITS-1
++ breq 0f
++ lsl r7, 1
++ rol r10
++ rol r11
++ sub r12, 1
++0:
++ cp r12, 0
++ brle __avr32_f64_mul_res_subnormal /*Result was subnormal.*/
++
++ /* Check for Inf. */
++ cp.w r12, 0x7ff
++ brge __avr32_f64_mul_res_inf
++
++ /* Insert exponent. */
++ bfins r11, r12, 20, 11
++
++ /* Result was not subnormal. Perform rounding. */
++ /* For the fast version we discard the sticky bits and always round
++ the halfwaycase up. */
++24:
++#if defined(L_avr32_f64_mul)
++ or r6, r6, r10 << 31 /* Or in parity bit into stickybits */
++ or r7, r7, r6 >> 1 /* Or together sticky and still make the msb
++ of r7 represent the halfway bit. */
++ eorh r7, 0x8000 /* Toggle halfway bit. */
++ /* We should now round up by adding one for the following cases:
++
++ halfway sticky|parity round-up
++ 0 x no
++ 1 0 no
++ 1 1 yes
++
++ Since we have inverted the halfway bit we can use the satu instruction
++ by saturating to 1 bit to implement this.
++ */
++ satu r7 >> 0, 1
++#else
++ lsr r7, 31
++#endif
++ add r10, r7
++ acr r11
++
++ /* Insert sign bit*/
++ bld lr, 31
++ bst r11, 31
++
++ /* Return result in [r11,r10] */
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
++ ldm sp++, r5, r6, r7,pc
++#endif
++
++
++__avr32_f64_mul_op1_subnormal:
++ andh r11, 0x000f /* Remove sign bit and exponent */
++ clz r12, r10 /* Count leading zeros in lsw */
++ clz r6, r11 /* Count leading zeros in msw */
++ subcs r12, -32 + AVR32_F64_MUL_OP1_INT_BITS
++ movcs r6, r12
++ subcc r6, AVR32_F64_MUL_OP1_INT_BITS
++ cp.w r6, 32
++ brge 0f
++
++ /* shifting involves both msw and lsw*/
++ rsub r12, r6, 32 /* shift mantissa */
++ lsl r11, r11, r6
++ lsr r12, r10, r12
++ or r11, r12
++ lsl r10, r10, r6
++ sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
++ sub r7, r6 /* adjust exponent */
++ rjmp 22b /* Finished */
++0:
++ /* msw is zero so only need to consider lsw */
++ lsl r11, r10, r6
++ breq __avr32_f64_mul_res_zero
++ mov r10, 0
++ sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
++ sub r7, r6 /* adjust exponent */
++ rjmp 22b
++
++
++__avr32_f64_mul_op2_subnormal:
++ andh r9, 0x000f /* Remove sign bit and exponent */
++ clz r12, r8 /* Count leading zeros in lsw */
++ clz r5, r9 /* Count leading zeros in msw */
++ subcs r12, -32 + AVR32_F64_MUL_OP2_INT_BITS
++ movcs r5, r12
++ subcc r5, AVR32_F64_MUL_OP2_INT_BITS
++ cp.w r5, 32
++ brge 0f
++
++ /* shifting involves both msw and lsw*/
++ rsub r12, r5, 32 /* shift mantissa */
++ lsl r9, r9, r5
++ lsr r12, r8, r12
++ or r9, r12
++ lsl r8, r8, r5
++ sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
++ sub r6, r5 /* adjust exponent */
++ rjmp 23b /* Finished */
++0:
++ /* msw is zero so only need to consider lsw */
++ lsl r9, r8, r5
++ breq __avr32_f64_mul_res_zero
++ mov r8, 0
++ sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
++ sub r6, r5 /* adjust exponent */
++ rjmp 23b
++
++
++__avr32_f64_mul_op_nan_or_inf:
++ /* Same code for OP1 and OP2*/
++ /* Since we are here, at least one of the OPs were NaN or INF*/
++ andh r9, 0x000f /* Remove sign bit and exponent */
++ andh r11, 0x000f /* Remove sign bit and exponent */
++ /* Merge the regs in each operand to check for zero*/
++ or r11, r10 /* op1 */
++ or r9, r8 /* op2 */
++ /* Check if op1 is NaN or INF */
++ cp r7, 0x7ff
++ brne __avr32_f64_mul_op1_not_naninf
++ /* op1 was NaN or INF.*/
++ cp r11, 0
++ brne __avr32_f64_mul_res_nan /* op1 was NaN. Result will be NaN*/
++ /*op1 was INF. check if op2 is NaN or INF*/
++ cp r6, 0x7ff
++ brne __avr32_f64_mul_res_inf /*op1 was INF, op2 was neither NaN nor INF*/
++ /* op1 is INF, op2 is either NaN or INF*/
++ cp r9, 0
++ breq __avr32_f64_mul_res_inf /*op2 was also INF*/
++ rjmp __avr32_f64_mul_res_nan /*op2 was NaN*/
++
++__avr32_f64_mul_op1_not_naninf:
++ /* op1 was not NaN nor INF. Then op2 must be NaN or INF*/
++ cp r9, 0
++ breq __avr32_f64_mul_res_inf /*op2 was INF, return INF*/
++ rjmp __avr32_f64_mul_res_nan /*else return NaN*/
++
++__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. */
++#if defined(L_avr32_f64_mul)
++ /* Check how much we must scale down the mantissa. */
++ neg r12
++ sub r12, -1 /* We do no longer have an implicit bit. */
++ satu r12 >> 0, 6 /* Saturate shift amount to max 63. */
++ cp.w r12, 32
++ brge 0f
++ /* Shift amount <32 */
++ rsub r8, r12, 32
++ or r6, r7
++ lsr r7, r7, r12
++ lsl r9, r10, r8
++ or r7, r9
++ lsr r10, r10, r12
++ lsl r9, r11, r8
++ or r10, r9
++ lsr r11, r11, r12
++ rjmp 24b
++0:
++ /* Shift amount >=32 */
++ rsub r8, r12, 32
++ moveq r9, 0
++ breq 0f
++ lsl r9, r11, r8
++0:
++ or r6, r7
++ or r6, r6, r10 << 1
++ lsr r10, r10, r12
++ or r7, r9, r10
++ lsr r10, r11, r12
++ mov r11, 0
++ rjmp 24b
++#else
++ /* Flush to zero for the fast version. */
++ mov r11, lr /*Get correct sign*/
++ andh r11, 0x8000, COH
++ mov r10, 0
++ ldm sp++, r5, r6, r7,pc
++#endif
++
++__avr32_f64_mul_res_zero:/* Multiply result is zero. */
++ mov r11, lr /*Get correct sign*/
++ andh r11, 0x8000, COH
++ mov r10, 0
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
++ ldm sp++, r5, r6, r7,pc
++#endif
++
++__avr32_f64_mul_res_nan: /* Return NaN. */
++ mov r11, -1
++ mov r10, -1
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
++ ldm sp++, r5, r6, r7,pc
++#endif
++
++__avr32_f64_mul_res_inf: /* Return INF. */
++ mov r11, 0xfff00000
++ bld lr, 31
++ bst r11, 31
++ mov r10, 0
++#if defined(L_avr32_f64_mul)
++ popm r4-r7, pc
++#else
++ ldm sp++, r5, r6, r7,pc
++#endif
++
++__avr32_f64_mul_op1_zero:
++ /* Get sign */
++ eor r11, r11, r9
++ andh r11, 0x8000, COH
++ /* Check if op2 is Inf or NaN. */
++ bfextu r12, r9, 20, 11
++ cp.w r12, 0x7ff
++ retne r12 /* Return 0.0 */
++ /* Return NaN */
++ mov r10, -1
++ mov r11, -1
++ ret r12
++
++
++
++#endif
++
++
++#if defined(L_avr32_f64_addsub) || defined(L_avr32_f64_addsub_fast)
++ .align 2
++
++__avr32_f64_sub_from_add:
++ /* Switch sign on op2 */
++ eorh r9, 0x8000
++
++#if defined(L_avr32_f64_addsub_fast)
++ .global __avr32_f64_sub_fast
++ .type __avr32_f64_sub_fast,@function
++__avr32_f64_sub_fast:
++#else
++ .global __avr32_f64_sub
++ .type __avr32_f64_sub,@function
++__avr32_f64_sub:
++#endif
++
++ /* op1 in {r11,r10}*/
++ /* op2 in {r9,r8}*/
++
++#if defined(L_avr32_f64_addsub_fast)
++ /* If op2 is zero just return op1 */
++ or r12, r8, r9 << 1
++ reteq r12
++#endif
++
++ /* Check signs */
++ eor r12, r11, r9
++ /* Different signs, use addition. */
++ brmi __avr32_f64_add_from_sub
++
++ stm --sp, r5, r6, r7, lr
++
++ /* Get sign of op1 into r12 */
++ mov r12, r11
++ andh r12, 0x8000, COH
++
++ /* Remove sign from operands */
++ cbr r11, 31
++ cbr r9, 31
++
++ /* Put the largest number in [r11, r10]
++ and the smallest number in [r9, r8] */
++ cp r10, r8
++ cpc r11, r9
++ brhs 1f /* Skip swap if operands already correctly ordered*/
++ /* Operands were not correctly ordered, swap them*/
++ mov r7, r11
++ mov r11, r9
++ mov r9, r7
++ mov r7, r10
++ mov r10, r8
++ mov r8, r7
++ eorh r12, 0x8000 /* Invert sign in r12*/
++1:
++ /* Unpack largest operand - opH */
++ /* exp: r7 */
++ /* sf: r11, r10 */
++ lsr r7, r11, 20 /* Extract exponent */
++ lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
++ or r11, r11, r10>>21
++ lsl r10, 11
++ sbr r11, 31 /* Insert implicit bit */
++
++
++ /* Unpack smallest operand - opL */
++ /* exp: r6 */
++ /* sf: r9, r8 */
++ lsr r6, r9, 20 /* Extract exponent */
++ breq __avr32_f64_sub_opL_subnormal /* If either zero or subnormal */
++ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
++ or r9, r9, r8>>21
++ lsl r8, 11
++ sbr r9, 31 /* Insert implicit bit */
++
++
++__avr32_f64_sub_opL_subnormal_done:
++ /* opH is NaN or Inf. */
++ cp.w r7, 0x7ff
++ breq __avr32_f64_sub_opH_nan_or_inf
++
++ /* Get shift amount to scale mantissa of op2. */
++ rsub r6, r7
++ breq __avr32_f64_sub_shift_done /* No need to shift, exponents are equal*/
++
++ /* Scale mantissa [r9, r8] with amount [r6].
++ Uses scratch registers [r5] and [lr].
++ In IEEE mode:Must not forget the sticky bits we intend to shift out. */
++
++ rsub r5,r6,32 /* get (32 - shift count)
++ (if shift count > 32 we get a
++ negative value, but that will
++ work as well in the code below.) */
++
++ cp.w r6,32 /* handle shifts >= 32 separately */
++ brhs __avr32_f64_sub_longshift
++
++ /* small (<32) shift amount, both words are part of the shift
++ first remember whether part that is lost contains any 1 bits ... */
++ lsl lr,r8,r5 /* shift away bits that are part of
++ final mantissa. only part that goes
++ to lr are bits that will be lost */
++
++ /* ... and now to the actual shift */
++ lsl r5,r9,r5 /* get bits from msw destined for lsw*/
++ lsr r8,r8,r6 /* shift down lsw of mantissa */
++ lsr r9,r9,r6 /* shift down msw of mantissa */
++ or r8,r5 /* combine these bits with prepared lsw*/
++#if defined(L_avr32_f64_addsub)
++ cp.w lr,0 /* if any '1' bit in part we lost ...*/
++ srne lr
++ or r8, lr /* ... we need to set sticky bit*/
++#endif
++
++__avr32_f64_sub_shift_done:
++ /* Now subtract the mantissas. */
++ sub r10, r8
++ sbc r11, r11, r9
++
++ /* Normalize the exponent and mantissa pair stored in
++ [r11,r10] and exponent in [r7]. Needs two scratch registers [r6] and [lr]. */
++ clz r6,r11 /* Check if we have zeros in high bits */
++ breq __avr32_f64_sub_longnormalize_done /* No need for scaling if no zeros in high bits */
++ brcs __avr32_f64_sub_longnormalize
++
++
++ /* shift amount is smaller than 32, and involves both msw and lsw*/
++ rsub lr,r6,32 /* shift mantissa */
++ lsl r11,r11,r6
++ lsr lr,r10,lr
++ or r11,lr
++ lsl r10,r10,r6
++
++ sub r7,r6 /* adjust exponent */
++ brle __avr32_f64_sub_subnormal_result
++__avr32_f64_sub_longnormalize_done:
++
++#if defined(L_avr32_f64_addsub)
++ /* Insert the bits we will remove from the mantissa r9[31:21] */
++ lsl r9, r10, (32 - 11)
++#else
++ /* Keep the last bit shifted out. */
++ bfextu r9, r10, 10, 1
++#endif
++
++ /* Pack final result*/
++ /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
++ /* Result in [r11,r10] */
++ /* Insert mantissa */
++ lsr r10, 11
++ or r10, r10, r11<<21
++ lsr r11, 11
++ /* Insert exponent and sign bit*/
++ bfins r11, r7, 20, 11
++ or r11, r12
++
++ /* Round */
++__avr32_f64_sub_round:
++#if defined(L_avr32_f64_addsub)
++ mov_imm r7, 0x80000000
++ bld r10, 0
++ subne r7, -1
++
++ cp.w r9, r7
++ srhs r9
++#endif
++ add r10, r9
++ acr r11
++
++ /* Return result in [r11,r10] */
++ ldm sp++, r5, r6, r7,pc
++
++
++
++__avr32_f64_sub_opL_subnormal:
++ /* Extract the of mantissa */
++ lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
++ or r9, r9, r8>>21
++ lsl r8, 11
++
++ /* Set exponent to 1 if we do not have a zero. */
++ or lr, r9, r8
++ movne r6,1
++
++ /* Check if opH is also subnormal. If so, clear implicit bit in r11*/
++ rsub lr, r7, 0
++ moveq r7,1
++ bst r11, 31
++
++ /* Check if op1 is zero, if so set exponent to 0. */
++ or lr, r11, r10
++ moveq r7,0
++
++ rjmp __avr32_f64_sub_opL_subnormal_done
++
++__avr32_f64_sub_opH_nan_or_inf:
++ /* Check if opH is NaN, if so return NaN */
++ cbr r11, 31
++ or lr, r11, r10
++ brne __avr32_f64_sub_return_nan
++
++ /* opH is Inf. */
++ /* Check if opL is Inf. or NaN */
++ cp.w r6, 0x7ff
++ breq __avr32_f64_sub_return_nan
++ /* Return infinity with correct sign. */
++ or r11, r12, r7 << 20
++ ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
++__avr32_f64_sub_return_nan:
++ mov r10, -1 /* Generate NaN in r11, r10 */
++ mov r11, -1
++ ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
++
++
++__avr32_f64_sub_subnormal_result:
++#if defined(L_avr32_f64_addsub)
++ /* Check how much we must scale down the mantissa. */
++ neg r7
++ sub r7, -1 /* We do no longer have an implicit bit. */
++ satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
++ cp.w r7, 32
++ brge 0f
++ /* Shift amount <32 */
++ rsub r8, r7, 32
++ lsl r9, r10, r8
++ srne r6
++ lsr r10, r10, r7
++ or r10, r6 /* Sticky bit from the
++ part that was shifted out. */
++ lsl r9, r11, r8
++ or r10, r10, r9
++ lsr r11, r10, r7
++ /* Set exponent */
++ mov r7, 0
++ rjmp __avr32_f64_sub_longnormalize_done
++0:
++ /* Shift amount >=32 */
++ rsub r8, r7, 64
++ lsl r9, r11, r8
++ or r9, r10
++ srne r6
++ lsr r10, r11, r7
++ or r10, r6 /* Sticky bit from the
++ part that was shifted out. */
++ mov r11, 0
++ /* Set exponent */
++ mov r7, 0
++ rjmp __avr32_f64_sub_longnormalize_done
++#else
++ /* Just flush subnormals to zero. */
++ mov r10, 0
++ mov r11, 0
++#endif
++ ldm sp++, r5, r6, r7, pc
++
++__avr32_f64_sub_longshift:
++ /* large (>=32) shift amount, only lsw will have bits left after shift.
++ note that shift operations will use ((shift count=r6) mod 32) so
++ we do not need to subtract 32 from shift count. */
++ /* Saturate the shift amount to 63. If the amount
++ is any larger op2 is insignificant. */
++ satu r6 >> 0, 6
++
++#if defined(L_avr32_f64_addsub)
++ /* first remember whether part that is lost contains any 1 bits ... */
++ moveq lr, r8 /* If shift amount is 32, no bits from msw are lost. */
++ breq 0f
++ lsl lr,r9,r5 /* save all lost bits from msw */
++ or lr,r8 /* also save lost bits (all) from lsw
++ now lr != 0 if we lose any bits */
++#endif
++0:
++ /* ... and now to the actual shift */
++ lsr r8,r9,r6 /* Move msw to lsw and shift. */
++ mov r9,0 /* clear msw */
++#if defined(L_avr32_f64_addsub)
++ cp.w lr,0 /* if any '1' bit in part we lost ...*/
++ srne lr
++ or r8, lr /* ... we need to set sticky bit*/
++#endif
++ rjmp __avr32_f64_sub_shift_done
++
++__avr32_f64_sub_longnormalize:
++ /* shift amount is greater than 32 */
++ clz r6,r10 /* shift mantissa */
++ /* If the resulting mantissa is zero the result is
++ zero so force exponent to zero. */
++ movcs r7, 0
++ movcs r6, 0
++ movcs r12, 0 /* Also clear sign bit. A zero result from subtraction
++ always is +0.0 */
++ subcc r6,-32
++ lsl r11,r10,r6
++ mov r10,0
++ sub r7,r6 /* adjust exponent */
++ brle __avr32_f64_sub_subnormal_result
++ rjmp __avr32_f64_sub_longnormalize_done
++
++
++
++ .align 2
++__avr32_f64_add_from_sub:
++ /* Switch sign on op2 */
++ eorh r9, 0x8000
++
++#if defined(L_avr32_f64_addsub_fast)
++ .global __avr32_f64_add_fast
++ .type __avr32_f64_add_fast,@function
++__avr32_f64_add_fast:
++#else
++ .global __avr32_f64_add
++ .type __avr32_f64_add,@function
++__avr32_f64_add:
++#endif
++
++ /* op1 in {r11,r10}*/
++ /* op2 in {r9,r8}*/
++
++#if defined(L_avr32_f64_addsub_fast)
++ /* If op2 is zero just return op1 */
++ or r12, r8, r9 << 1
++ reteq r12
++#endif
++
++ /* Check signs */
++ eor r12, r11, r9
++ /* Different signs, use subtraction. */
++ brmi __avr32_f64_sub_from_add
++
++ stm --sp, r5, r6, r7, lr
++
++ /* Get sign of op1 into r12 */
++ mov r12, r11
++ andh r12, 0x8000, COH
++
++ /* Remove sign from operands */
++ cbr r11, 31
++ cbr r9, 31
++
++ /* Put the number with the largest exponent in [r11, r10]
++ and the number with the smallest exponent in [r9, r8] */
++ cp r11, r9
++ brhs 1f /* Skip swap if operands already correctly ordered */
++ /* Operands were not correctly ordered, swap them */
++ mov r7, r11
++ mov r11, r9
++ mov r9, r7
++ mov r7, r10
++ mov r10, r8
++ mov r8, r7
++1:
++ mov lr, 0 /* Set sticky bits to zero */
++ /* Unpack largest operand - opH */
++ /* exp: r7 */
++ /* sf: r11, r10 */
++ bfextu R7, R11, 20, 11 /* Extract exponent */
++ bfextu r11, r11, 0, 20 /* Extract mantissa */
++ sbr r11, 20 /* Insert implicit bit */
++
++ /* Unpack smallest operand - opL */
++ /* exp: r6 */
++ /* sf: r9, r8 */
++ bfextu R6, R9, 20, 11 /* Extract exponent */
++ breq __avr32_f64_add_op2_subnormal
++ bfextu r9, r9, 0, 20 /* Extract mantissa */
++ sbr r9, 20 /* Insert implicit bit */
++
++2:
++ /* opH is NaN or Inf. */
++ cp.w r7, 0x7ff
++ breq __avr32_f64_add_opH_nan_or_inf
++
++ /* Get shift amount to scale mantissa of op2. */
++ rsub r6, r7
++ breq __avr32_f64_add_shift_done /* No need to shift, exponents are equal*/
++
++ /* Scale mantissa [r9, r8] with amount [r6].
++ Uses scratch registers [r5] and [lr].
++ In IEEE mode:Must not forget the sticky bits we intend to shift out. */
++ rsub r5,r6,32 /* get (32 - shift count)
++ (if shift count > 32 we get a
++ negative value, but that will
++ work as well in the code below.) */
++
++ cp.w r6,32 /* handle shifts >= 32 separately */
++ brhs __avr32_f64_add_longshift
++
++ /* small (<32) shift amount, both words are part of the shift
++ first remember whether part that is lost contains any 1 bits ... */
++ lsl lr,r8,r5 /* shift away bits that are part of
++ final mantissa. only part that goes
++ to lr are bits that will be lost */
++
++ /* ... and now to the actual shift */
++ lsl r5,r9,r5 /* get bits from msw destined for lsw*/
++ lsr r8,r8,r6 /* shift down lsw of mantissa */
++ lsr r9,r9,r6 /* shift down msw of mantissa */
++ or r8,r5 /* combine these bits with prepared lsw*/
++
++__avr32_f64_add_shift_done:
++ /* Now add the mantissas. */
++ add r10, r8
++ adc r11, r11, r9
++
++ /* Check if we overflowed. */
++ bld r11, 21
++ breq __avr32_f64_add_res_of:
++
++__avr32_f64_add_res_of_done:
++
++ /* Pack final result*/
++ /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
++ /* Result in [r11,r10] */
++ /* Insert exponent and sign bit*/
++ bfins r11, r7, 20, 11
++ or r11, r12
++
++ /* Round */
++__avr32_f64_add_round:
++#if defined(L_avr32_f64_addsub)
++ bfextu r12, r10, 0, 1 /* Extract parity bit.*/
++ or lr, r12 /* or it together with the sticky bits. */
++ eorh lr, 0x8000 /* Toggle round bit. */
++ /* We should now round up by adding one for the following cases:
++
++ halfway sticky|parity round-up
++ 0 x no
++ 1 0 no
++ 1 1 yes
++
++ Since we have inverted the halfway bit we can use the satu instruction
++ by saturating to 1 bit to implement this.
++ */
++ satu lr >> 0, 1
++#else
++ lsr lr, 31
++#endif
++ add r10, lr
++ acr r11
++
++ /* Return result in [r11,r10] */
++ ldm sp++, r5, r6, r7,pc
++
++
++__avr32_f64_add_opH_nan_or_inf:
++ /* Check if opH is NaN, if so return NaN */
++ cbr r11, 20
++ or lr, r11, r10
++ brne __avr32_f64_add_return_nan
++
++ /* opH is Inf. */
++ /* Check if opL is Inf. or NaN */
++ cp.w r6, 0x7ff
++ breq __avr32_f64_add_opL_nan_or_inf
++ ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
++__avr32_f64_add_opL_nan_or_inf:
++ cbr r9, 20
++ or lr, r9, r8
++ brne __avr32_f64_add_return_nan
++ mov r10, 0 /* Generate Inf in r11, r10 */
++ mov_imm r11, 0x7ff00000
++ or r11, r12 /* Put sign bit back */
++ ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */
++__avr32_f64_add_return_nan:
++ mov r10, -1 /* Generate NaN in r11, r10 */
++ mov r11, -1
++ ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
++
++
++__avr32_f64_add_longshift:
++ /* large (>=32) shift amount, only lsw will have bits left after shift.
++ note that shift operations will use ((shift count=r6) mod 32) so
++ we do not need to subtract 32 from shift count. */
++ /* Saturate the shift amount to 63. If the amount
++ is any larger op2 is insignificant. */
++ satu r6 >> 0, 6
++ /* If shift amount is 32 there are no bits from the msw that are lost. */
++ moveq lr, r8
++ breq 0f
++ /* first remember whether part that is lost contains any 1 bits ... */
++ lsl lr,r9,r5 /* save all lost bits from msw */
++#if defined(L_avr32_f64_addsub)
++ cp.w r8, 0
++ srne r8
++ or lr,r8 /* also save lost bits (all) from lsw
++ now lr != 0 if we lose any bits */
++#endif
++0:
++ /* ... and now to the actual shift */
++ lsr r8,r9,r6 /* msw -> lsw and make rest of shift inside lsw*/
++ mov r9,0 /* clear msw */
++ rjmp __avr32_f64_add_shift_done
++
++__avr32_f64_add_res_of:
++ /* We overflowed. Scale down mantissa by shifting right one position. */
++ or lr, lr, lr << 1 /* Remember stickybits*/
++ lsr r11, 1
++ ror r10
++ ror lr
++ sub r7, -1 /* Increment exponent */
++
++ /* Clear mantissa to set result to Inf if the exponent is 255. */
++ cp.w r7, 0x7ff
++ moveq r10, 0
++ moveq r11, 0
++ moveq lr, 0
++ rjmp __avr32_f64_add_res_of_done
++
++__avr32_f64_add_op2_subnormal:
++ /* Set epxponent to 1 */
++ mov r6, 1
++
++ /* Check if op2 is also subnormal. */
++ cp.w r7, 0
++ brne 2b
++
++ cbr r11, 20
++ /* Both operands are subnormal. Just addd the mantissas
++ and the exponent will automatically be set to 1 if
++ we overflow into a normal number. */
++ add r10, r8
++ adc r11, r11, r9
++
++ /* Add sign bit */
++ or r11, r12
++
++ /* Return result in [r11,r10] */
++ ldm sp++, r5, r6, r7,pc
++
++
++
++#endif
++
++#ifdef L_avr32_f64_to_u32
++ /* This goes into L_fixdfsi */
++#endif
++
++
++#ifdef L_avr32_f64_to_s32
++ .global __avr32_f64_to_u32
++ .type __avr32_f64_to_u32,@function
++__avr32_f64_to_u32:
++ cp.w r11, 0
++ retmi 0 /* Negative returns 0 */
++
++ /* Fallthrough to df to signed si conversion */
++ .global __avr32_f64_to_s32
++ .type __avr32_f64_to_s32,@function
++__avr32_f64_to_s32:
++ lsl r12,r11,1
++ lsr r12,21 /* extract exponent*/
++ sub r12,1023 /* convert to unbiased exponent.*/
++ retlo 0 /* too small exponent implies zero. */
++
++1:
++ rsub r12,r12,31 /* shift count = 31 - exponent */
++ mov r9,r11 /* save sign for later...*/
++ lsl r11,11 /* remove exponent and sign*/
++ sbr r11,31 /* add implicit bit*/
++ or r11,r11,r10>>21 /* get rest of bits from lsw of double */
++ lsr r11,r11,r12 /* shift down mantissa to final place */
++ lsl r9,1 /* sign -> carry */
++ retcc r11 /* if positive, we are done */
++ neg r11 /* if negative float, negate result */
++ ret r11
++
++#endif /* L_fixdfsi*/
++
++#ifdef L_avr32_f64_to_u64
++ /* Actual function is in L_fixdfdi */
++#endif
++
++#ifdef L_avr32_f64_to_s64
++ .global __avr32_f64_to_u64
++ .type __avr32_f64_to_u64,@function
++__avr32_f64_to_u64:
++ cp.w r11,0
++ /* Negative numbers return zero */
++ movmi r10, 0
++ movmi r11, 0
++ retmi r11
++
++
++
++ /* Fallthrough */
++ .global __avr32_f64_to_s64
++ .type __avr32_f64_to_s64,@function
++__avr32_f64_to_s64:
++ lsl r9,r11,1
++ lsr r9,21 /* get exponent*/
++ sub r9,1023 /* convert to correct range*/
++ /* Return zero if exponent to small */
++ movlo r10, 0
++ movlo r11, 0
++ retlo r11
++
++ mov r8,r11 /* save sign for later...*/
++1:
++ lsl r11,11 /* remove exponent */
++ sbr r11,31 /* add implicit bit*/
++ or r11,r11,r10>>21 /* get rest of bits from lsw of double*/
++ lsl r10,11 /* align lsw correctly as well */
++ rsub r9,r9,63 /* shift count = 63 - exponent */
++ breq 1f
++
++ cp.w r9,32 /* is shift count more than one reg? */
++ brhs 0f
++
++ mov r12,r11 /* save msw */
++ lsr r10,r10,r9 /* small shift count, shift down lsw */
++ lsr r11,r11,r9 /* small shift count, shift down msw */
++ rsub r9,r9,32 /* get 32-size of shifted out tail */
++ lsl r12,r12,r9 /* align part to move from msw to lsw */
++ or r10,r12 /* combine to get new lsw */
++ rjmp 1f
++
++0:
++ lsr r10,r11,r9 /* large shift count,only lsw get bits
++ note that shift count is modulo 32*/
++ mov r11,0 /* msw will be 0 */
++
++1:
++ lsl r8,1 /* sign -> carry */
++ retcc r11 /* if positive, we are done */
++
++ neg r11 /* if negative float, negate result */
++ neg r10
++ scr r11
++ ret r11
++
++#endif
++
++#ifdef L_avr32_u32_to_f64
++ /* Code located in L_floatsidf */
++#endif
++
++#ifdef L_avr32_s32_to_f64
++ .global __avr32_u32_to_f64
++ .type __avr32_u32_to_f64,@function
++__avr32_u32_to_f64:
++ sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */
++ mov r12, 0 /* always positive */
++ rjmp 0f /* Jump to common code for floatsidf */
++
++ .global __avr32_s32_to_f64
++ .type __avr32_s32_to_f64,@function
++__avr32_s32_to_f64:
++ mov r11, r12 /* Keep original value in r12 for sign */
++ abs r11 /* Absolute value if r12 */
++0:
++ mov r10,0 /* let remaining bits be zero */
++ reteq r11 /* zero long will return zero float */
++
++ pushm lr
++ mov r9,31+1023 /* set exponent */
++
++ normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */
++
++ /* Check if a subnormal result was created */
++ cp.w r9, 0
++ brgt 0f
++
++ adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */
++ popm pc
++0:
++
++ /* Round result */
++ round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/
++ cp.w r9,0x7ff
++ brlt 0f
++ /*Return infinity */
++ mov r10, 0
++ mov_imm r11, 0xffe00000
++ rjmp __floatsidf_return_op1
++
++0:
++
++ /* Pack */
++ pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
++__floatsidf_return_op1:
++ lsl r12,1 /* shift in sign bit */
++ ror r11
++
++ popm pc
++#endif
++
++
++#ifdef L_avr32_f32_cmp_eq
++ .global __avr32_f32_cmp_eq
++ .type __avr32_f32_cmp_eq,@function
++__avr32_f32_cmp_eq:
++ cp.w r12, r11
++ breq 0f
++ /* If not equal check for +/-0 */
++ /* Or together the two values and shift out the sign bit.
++ If the result is zero, then the two values are both zero. */
++ or r12, r11
++ lsl r12, 1
++ reteq 1
++ ret 0
++0:
++ /* Numbers were equal. Check for NaN or Inf */
++ mov_imm r11, 0xff000000
++ lsl r12, 1
++ cp.w r12, r11
++ retls 1 /* 0 if NaN, 1 otherwise */
++ ret 0
++#endif
++
++#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
++#ifdef L_avr32_f32_cmp_ge
++ .global __avr32_f32_cmp_ge
++ .type __avr32_f32_cmp_ge,@function
++__avr32_f32_cmp_ge:
++#endif
++#ifdef L_avr32_f32_cmp_lt
++ .global __avr32_f32_cmp_lt
++ .type __avr32_f32_cmp_lt,@function
++__avr32_f32_cmp_lt:
++#endif
++ lsl r10, r12, 1 /* Remove sign bits */
++ lsl r9, r11, 1
++ subfeq r10, 0
++#ifdef L_avr32_f32_cmp_ge
++ reteq 1 /* Both number are zero. Return true. */
++#endif
++#ifdef L_avr32_f32_cmp_lt
++ reteq 0 /* Both number are zero. Return false. */
++#endif
++ mov_imm r8, 0xff000000
++ cp.w r10, r8
++ rethi 0 /* Op0 is NaN */
++ cp.w r9, r8
++ rethi 0 /* Op1 is Nan */
++
++ eor r8, r11, r12
++ bld r12, 31
++#ifdef L_avr32_f32_cmp_ge
++ srcc r8 /* Set result to true if op0 is positive*/
++#endif
++#ifdef L_avr32_f32_cmp_lt
++ srcs r8 /* Set result to true if op0 is negative*/
++#endif
++ retmi r8 /* Return if signs are different */
++ brcs 0f /* Both signs negative? */
++
++ /* Both signs positive */
++ cp.w r12, r11
++#ifdef L_avr32_f32_cmp_ge
++ reths 1
++ retlo 0
++#endif
++#ifdef L_avr32_f32_cmp_lt
++ reths 0
++ retlo 1
++#endif
++0:
++ /* Both signs negative */
++ cp.w r11, r12
++#ifdef L_avr32_f32_cmp_ge
++ reths 1
++ retlo 0
++#endif
++#ifdef L_avr32_f32_cmp_lt
++ reths 0
++ retlo 1
++#endif
++#endif
++
++
++#ifdef L_avr32_f64_cmp_eq
++ .global __avr32_f64_cmp_eq
++ .type __avr32_f64_cmp_eq,@function
++__avr32_f64_cmp_eq:
++ cp.w r10,r8
++ cpc r11,r9
++ breq 0f
++
++ /* Args were not equal*/
++ /* Both args could be zero with different sign bits */
++ lsl r11,1 /* get rid of sign bits */
++ lsl r9,1
++ or r11,r10 /* Check if all bits are zero */
++ or r11,r9
++ or r11,r8
++ reteq 1 /* If all zeros the arguments are equal
++ so return 1 else return 0 */
++ ret 0
++0:
++ /* check for NaN */
++ lsl r11,1
++ mov_imm r12, 0xffe00000
++ cp.w r10,0
++ cpc r11,r12 /* check if nan or inf */
++ retls 1 /* If Arg is NaN return 0 else 1*/
++ ret 0 /* Return */
++
++#endif
++
++
++#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt)
++
++#ifdef L_avr32_f64_cmp_ge
++ .global __avr32_f64_cmp_ge
++ .type __avr32_f64_cmp_ge,@function
++__avr32_f64_cmp_ge:
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ .global __avr32_f64_cmp_lt
++ .type __avr32_f64_cmp_lt,@function
++__avr32_f64_cmp_lt:
++#endif
++
++ /* compare magnitude of op1 and op2 */
++ st.w --sp, lr
++ st.w --sp, r7
++ lsl r11,1 /* Remove sign bit of op1 */
++ srcs r12 /* Sign op1 to lsb of r12*/
++ lsl r9,1 /* Remove sign bit of op2 */
++ srcs r7
++ rol r12 /* Sign op2 to lsb of lr, sign bit op1 bit 1 of r12*/
++
++
++ /* Check for Nan */
++ mov_imm lr, 0xffe00000
++ cp.w r10,0
++ cpc r11,lr
++ brhi 0f /* We have NaN */
++ cp.w r8,0
++ cpc r9,lr
++ brhi 0f /* We have NaN */
++
++ cp.w r11, 0
++ subfeq r10, 0
++ breq 3f /* op1 zero */
++ ld.w r7, sp++
++ ld.w lr, sp++
++
++ cp.w r12,3 /* both operands negative ?*/
++ breq 1f
++
++ cp.w r12,1 /* both operands positive? */
++ brlo 2f
++
++ /* Different signs. If sign of op1 is negative the difference
++ between op1 and op2 will always be negative, and if op1 is
++ positive the difference will always be positive */
++#ifdef L_avr32_f64_cmp_ge
++ reteq 1
++ retne 0
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ reteq 0
++ retne 1
++#endif
++
++2:
++ /* Both operands positive. Just compute the difference */
++ cp.w r10,r8
++ cpc r11,r9
++#ifdef L_avr32_f64_cmp_ge
++ reths 1
++ retlo 0
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ reths 0
++ retlo 1
++#endif
++
++1:
++ /* Both operands negative. Compute the difference with operands switched */
++ cp r8,r10
++ cpc r9,r11
++#ifdef L_avr32_f64_cmp_ge
++ reths 1
++ retlo 0
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ reths 0
++ retlo 1
++#endif
++
++0:
++ ld.w r7, sp++
++ popm pc, r12=0
++#endif
++
++3:
++ cp.w r7, 1 /* Check sign bit from r9 */
++#ifdef L_avr32_f64_cmp_ge
++ sreq r12 /* If op2 is negative then op1 >= op2. */
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ srne r12 /* If op2 is positve then op1 <= op2. */
++#endif
++ cp.w r9, 0
++ subfeq r8, 0
++ ld.w r7, sp++
++ ld.w lr, sp++
++#ifdef L_avr32_f64_cmp_ge
++ reteq 1 /* Both operands are zero. Return true. */
++#endif
++#ifdef L_avr32_f64_cmp_lt
++ reteq 0 /* Both operands are zero. Return false. */
++#endif
++ ret r12
++
++
++#if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast)
++ .align 2
++
++#if defined(L_avr32_f64_div_fast)
++ .global __avr32_f64_div_fast
++ .type __avr32_f64_div_fast,@function
++__avr32_f64_div_fast:
++#else
++ .global __avr32_f64_div
++ .type __avr32_f64_div,@function
++__avr32_f64_div:
++#endif
++ stm --sp, r0, r1, r2, r3, r4, r5, r6, r7,lr
++ /* op1 in {r11,r10}*/
++ /* op2 in {r9,r8}*/
++ eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
++
++
++ /* Unpack op1 to 2.62 format*/
++ /* exp: r7 */
++ /* sf: r11, r10 */
++ lsr r7, r11, 20 /* Extract exponent */
++
++ lsl r11, 9 /* Extract mantissa, leave room for implicit bit */
++ or r11, r11, r10>>23
++ lsl r10, 9
++ sbr r11, 29 /* Insert implicit bit */
++ andh r11, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
++
++ cbr r7, 11 /* Clear sign bit */
++ /* Check if normalization is needed */
++ breq 11f /*If number is subnormal, normalize it */
++22:
++ cp r7, 0x7ff
++ brge 2f /* Check op1 for NaN or Inf */
++
++ /* Unpack op2 to 2.62 format*/
++ /* exp: r6 */
++ /* sf: r9, r8 */
++ lsr r6, r9, 20 /* Extract exponent */
++
++ lsl r9, 9 /* Extract mantissa, leave room for implicit bit */
++ or r9, r9, r8>>23
++ lsl r8, 9
++ sbr r9, 29 /* Insert implicit bit */
++ andh r9, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
++
++ cbr r6, 11 /* Clear sign bit */
++ /* Check if normalization is needed */
++ breq 13f /*If number is subnormal, normalize it */
++23:
++ cp r6, 0x7ff
++ brge 3f /* Check op2 for NaN or Inf */
++
++ /* Calculate new exponent */
++ sub r7, r6
++ sub r7,-1023
++
++ /* Divide */
++ /* Approximating 1/d with the following recurrence: */
++ /* R[j+1] = R[j]*(2-R[j]*d) */
++ /* Using 2.62 format */
++ /* TWO: r12 */
++ /* d = op2 = divisor (2.62 format): r9,r8 */
++ /* Multiply result : r5, r4 */
++ /* Initial guess : r3, r2 */
++ /* New approximations : r3, r2 */
++ /* op1 = Dividend (2.62 format) : r11, r10 */
++
++ mov_imm r12, 0x80000000
++
++ /* Load initial guess, using look-up table */
++ /* Initial guess is of format 01.XY, where XY is constructed as follows: */
++ /* Let d be of following format: 00.1xy....., then XY=~xy */
++ /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
++ /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
++ /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
++ /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
++ /* r2 is also part of the reg pair forming initial guess, but it*/
++ /* is kept uninitialized to save one cycle since it has so low significance*/
++
++ lsr r3, r12, 1
++ bfextu r4, r9, 27, 2
++ com r4
++ bfins r3, r4, 28, 2
++
++ /* First approximation */
++ /* Approximating to 32 bits */
++ /* r5 = R[j]*d */
++ mulu.d r4, r3, r9
++ /* r5 = 2-R[j]*d */
++ sub r5, r12, r5<<2
++ /* r3 = R[j]*(2-R[j]*d) */
++ mulu.d r4, r3, r5
++ lsl r3, r5, 2
++
++ /* Second approximation */
++ /* Approximating to 32 bits */
++ /* r5 = R[j]*d */
++ mulu.d r4, r3, r9
++ /* r5 = 2-R[j]*d */
++ sub r5, r12, r5<<2
++ /* r3 = R[j]*(2-R[j]*d) */
++ mulu.d r4, r3, r5
++ lsl r3, r5, 2
++
++ /* Third approximation */
++ /* Approximating to 32 bits */
++ /* r5 = R[j]*d */
++ mulu.d r4, r3, r9
++ /* r5 = 2-R[j]*d */
++ sub r5, r12, r5<<2
++ /* r3 = R[j]*(2-R[j]*d) */
++ mulu.d r4, r3, r5
++ lsl r3, r5, 2
++
++ /* Fourth approximation */
++ /* Approximating to 64 bits */
++ /* r5,r4 = R[j]*d */
++ mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
++ lsl r5, 2
++ or r5, r5, r4>>30
++ lsl r4, 2
++ /* r5,r4 = 2-R[j]*d */
++ neg r4
++ sbc r5, r12, r5
++ /* r3,r2 = R[j]*(2-R[j]*d) */
++ mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
++ lsl r3, r5, 2
++ or r3, r3, r4>>30
++ lsl r2, r4, 2
++
++
++ /* Fifth approximation */
++ /* Approximating to 64 bits */
++ /* r5,r4 = R[j]*d */
++ mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
++ lsl r5, 2
++ or r5, r5, r4>>30
++ lsl r4, 2
++ /* r5,r4 = 2-R[j]*d */
++ neg r4
++ sbc r5, r12, r5
++ /* r3,r2 = R[j]*(2-R[j]*d) */
++ mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
++ lsl r3, r5, 2
++ or r3, r3, r4>>30
++ lsl r2, r4, 2
++
++
++ /* Multiply with dividend to get quotient */
++ mul_approx_df r3 /*ah*/, r2 /*al*/, r11 /*bh*/, r10 /*bl*/, r3 /*rh*/, r2 /*rl*/, r1 /*sh*/, r0 /*sl*/
++
++
++ /* To increase speed, this result is not corrected before final rounding.*/
++ /* This may give a difference to IEEE compliant code of 1 ULP.*/
++
++
++ /* Adjust exponent and mantissa */
++ /* r7:exp, [r3, r2]:mant, [r5, r4]:scratch*/
++ /* Mantissa may be of the format 0.xxxx or 1.xxxx. */
++ /* In the first case, shift one pos to left.*/
++ bld r3, 31-3
++ breq 0f
++ lsl r2, 1
++ rol r3
++ sub r7, 1
++#if defined(L_avr32_f64_div)
++ /* We must scale down the dividend to 5.59 format. */
++ lsr r10, 3
++ or r10, r10, r11 << 29
++ lsr r11, 3
++ rjmp 1f
++#endif
++0:
++#if defined(L_avr32_f64_div)
++ /* We must scale down the dividend to 6.58 format. */
++ lsr r10, 4
++ or r10, r10, r11 << 28
++ lsr r11, 4
++1:
++#endif
++ cp r7, 0
++ brle __avr32_f64_div_res_subnormal /* Result was subnormal. */
++
++
++#if defined(L_avr32_f64_div)
++ /* In order to round correctly we calculate the remainder:
++ Remainder = dividend[11:r10] - divisor[r9:r8]*quotient[r3:r2]
++ for the case when the quotient is halfway between the round-up
++ value and the round down value. If the remainder then is negative
++ it means that the quotient was to big and that it should not be
++ rounded up, if the remainder is positive the quotient was to small
++ and we need to round up. If the remainder is zero it means that the
++ quotient is exact but since we need to remove the guard bit we should
++ round to even. */
++
++ /* Truncate and add guard bit. */
++ andl r2, 0xff00
++ orl r2, 0x0080
++
++
++ /* Now do the multiplication. The quotient has the format 4.60
++ while the divisor has the format 2.62 which gives a result
++ of 6.58 */
++ mulu.d r0, r3, r8
++ macu.d r0, r2, r9
++ mulu.d r4, r2, r8
++ mulu.d r8, r3, r9
++ add r5, r0
++ adc r8, r8, r1
++ acr r9
++
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
++ cp r4, 0
++ cpc r5
++__avr32_f64_div_round_subnormal:
++ cpc r8, r10
++ cpc r9, r11
++ srlo r6 /* Remainder positive: we need to round up.*/
++ moveq r6, r12 /* Remainder zero: round up if mantissa odd. */
++#else
++ bfextu r6, r2, 7, 1 /* Get guard bit */
++#endif
++ /* Final packing, scale down mantissa. */
++ lsr r10, r2, 8
++ or r10, r10, r3<<24
++ lsr r11, r3, 8
++ /* Insert exponent and sign bit*/
++ bfins r11, r7, 20, 11
++ bld lr, 31
++ bst r11, 31
++
++ /* Final rounding */
++ add r10, r6
++ acr r11
++
++ /* Return result in [r11,r10] */
++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
++
++
++2:
++ /* Op1 is NaN or inf */
++ andh r11, 0x000f /* Extract mantissa */
++ or r11, r10
++ brne 16f /* Return NaN if op1 is NaN */
++ /* Op1 is inf check op2 */
++ lsr r6, r9, 20 /* Extract exponent */
++ cbr r6, 11 /* Clear sign bit */
++ cp r6, 0x7ff
++ brne 17f /* Inf/number gives inf, return inf */
++ rjmp 16f /* The rest gives NaN*/
++
++3:
++ /* Op1 is a valid number. Op 2 is NaN or inf */
++ andh r9, 0x000f /* Extract mantissa */
++ or r9, r8
++ brne 16f /* Return NaN if op2 is NaN */
++ rjmp 15f /* Op2 was inf, return zero*/
++
++11: /* Op1 was denormal. Fix it. */
++ lsl r11, 3
++ or r11, r11, r10 >> 29
++ lsl r10, 3
++ /* Check if op1 is zero. */
++ or r4, r10, r11
++ breq __avr32_f64_div_op1_zero
++ normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/
++ lsr r10, 2
++ or r10, r10, r11 << 30
++ lsr r11, 2
++ rjmp 22b
++
++
++13: /* Op2 was denormal. Fix it */
++ lsl r9, 3
++ or r9, r9, r8 >> 29
++ lsl r8, 3
++ /* Check if op2 is zero. */
++ or r4, r9, r8
++ breq 17f /* Divisor is zero -> return Inf */
++ normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/
++ lsr r8, 2
++ or r8, r8, r9 << 30
++ lsr r9, 2
++ rjmp 23b
++
++
++__avr32_f64_div_res_subnormal:/* Divide result was subnormal. */
++#if defined(L_avr32_f64_div)
++ /* Check how much we must scale down the mantissa. */
++ neg r7
++ sub r7, -1 /* We do no longer have an implicit bit. */
++ satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
++ cp.w r7, 32
++ brge 0f
++ /* Shift amount <32 */
++ /* Scale down quotient */
++ rsub r6, r7, 32
++ lsr r2, r2, r7
++ lsl r12, r3, r6
++ or r2, r12
++ lsr r3, r3, r7
++ /* Scale down the dividend to match the scaling of the quotient. */
++ lsl r1, r10, r6
++ lsr r10, r10, r7
++ lsl r12, r11, r6
++ or r10, r12
++ lsr r11, r11, r7
++ mov r0, 0
++ rjmp 1f
++0:
++ /* Shift amount >=32 */
++ rsub r6, r7, 32
++ moveq r0, 0
++ moveq r12, 0
++ breq 0f
++ lsl r0, r10, r6
++ lsl r12, r11, r6
++0:
++ lsr r2, r3, r7
++ mov r3, 0
++ /* Scale down the dividend to match the scaling of the quotient. */
++ lsr r1, r10, r7
++ or r1, r12
++ lsr r10, r11, r7
++ mov r11, 0
++1:
++ /* Start performing the same rounding as done for normal numbers
++ but this time we have scaled the quotient and dividend and hence
++ need a little different comparison. */
++ /* Truncate and add guard bit. */
++ andl r2, 0xff00
++ orl r2, 0x0080
++
++ /* Now do the multiplication. */
++ mulu.d r6, r3, r8
++ macu.d r6, r2, r9
++ mulu.d r4, r2, r8
++ mulu.d r8, r3, r9
++ add r5, r6
++ adc r8, r8, r7
++ acr r9
++
++ /* Set exponent to 0 */
++ mov r7, 0
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
++ cp r4, r0
++ cpc r5, r1
++ /* Now the rest of the rounding is the same as for normals. */
++ rjmp __avr32_f64_div_round_subnormal
++
++#endif
++15:
++ /* Flush to zero for the fast version. */
++ mov r11, lr /*Get correct sign*/
++ andh r11, 0x8000, COH
++ mov r10, 0
++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
++
++16: /* Return NaN. */
++ mov r11, -1
++ mov r10, 0
++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
++
++17:
++ /* Check if op1 is zero. */
++ or r4, r10, r11
++ breq __avr32_f64_div_op1_zero
++ /* Return INF. */
++ mov r11, lr /*Get correct sign*/
++ andh r11, 0x8000, COH
++ orh r11, 0x7ff0
++ mov r10, 0
++ ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
++
++__avr32_f64_div_op1_zero:
++ or r5, r8, r9 << 1
++ breq 16b /* 0.0/0.0 -> NaN */
++ bfextu r4, r9, 20, 11
++ cp r4, 0x7ff
++ brne 15b /* Return zero */
++ /* Check if divisor is Inf or NaN */
++ or r5, r8, r9 << 12
++ breq 15b /* Divisor is inf -> return zero */
++ rjmp 16b /* Return NaN */
++
++
++
++
++#endif
++
++#if defined(L_avr32_f32_addsub) || defined(L_avr32_f32_addsub_fast)
++
++ .align 2
++__avr32_f32_sub_from_add:
++ /* Switch sign on op2 */
++ eorh r11, 0x8000
++
++#if defined(L_avr32_f32_addsub_fast)
++ .global __avr32_f32_sub_fast
++ .type __avr32_f32_sub_fast,@function
++__avr32_f32_sub_fast:
++#else
++ .global __avr32_f32_sub
++ .type __avr32_f32_sub,@function
++__avr32_f32_sub:
++#endif
++
++ /* Check signs */
++ eor r8, r11, r12
++ /* Different signs, use subtraction. */
++ brmi __avr32_f32_add_from_sub
++
++ /* Get sign of op1 */
++ mov r8, r12
++ andh r12, 0x8000, COH
++
++ /* Remove sign from operands */
++ cbr r11, 31
++#if defined(L_avr32_f32_addsub_fast)
++ reteq r8 /* If op2 is zero return op1 */
++#endif
++ cbr r8, 31
++
++ /* Put the number with the largest exponent in r10
++ and the number with the smallest exponent in r9 */
++ max r10, r8, r11
++ min r9, r8, r11
++ cp r10, r8 /*If largest operand (in R10) is not equal to op1*/
++ subne r12, 1 /* Subtract 1 from sign, which will invert MSB of r12*/
++ andh r12, 0x8000, COH /*Mask all but MSB*/
++
++ /* Unpack exponent and mantissa of op1 */
++ lsl r8, r10, 8
++ sbr r8, 31 /* Set implicit bit. */
++ lsr r10, 23
++
++ /* op1 is NaN or Inf. */
++ cp.w r10, 0xff
++ breq __avr32_f32_sub_op1_nan_or_inf
++
++ /* Unpack exponent and mantissa of op2 */
++ lsl r11, r9, 8
++ sbr r11, 31 /* Set implicit bit. */
++ lsr r9, 23
++
++#if defined(L_avr32_f32_addsub)
++ /* Keep sticky bit for correct IEEE rounding */
++ st.w --sp, r12
++
++ /* op2 is either zero or subnormal. */
++ breq __avr32_f32_sub_op2_subnormal
++0:
++ /* Get shift amount to scale mantissa of op2. */
++ sub r12, r10, r9
++
++ breq __avr32_f32_sub_shift_done
++
++ /* Saturate the shift amount to 31. If the amount
++ is any larger op2 is insignificant. */
++ satu r12 >> 0, 5
++
++ /* Put the remaining bits into r9.*/
++ rsub r9, r12, 32
++ lsl r9, r11, r9
++
++ /* If the remaining bits are non-zero then we must subtract one
++ more from opL. */
++ subne r8, 1
++ srne r9 /* LSB of r9 represents sticky bits. */
++
++ /* Shift mantissa of op2 to same decimal point as the mantissa
++ of op1. */
++ lsr r11, r11, r12
++
++
++__avr32_f32_sub_shift_done:
++ /* Now subtract the mantissas. */
++ sub r8, r11
++
++ ld.w r12, sp++
++
++ /* Normalize resulting mantissa. */
++ clz r11, r8
++
++ retcs 0
++ lsl r8, r8, r11
++ sub r10, r11
++ brle __avr32_f32_sub_subnormal_result
++
++ /* Insert the bits we will remove from the mantissa into r9[31:24] */
++ or r9, r9, r8 << 24
++#else
++ /* Ignore sticky bit to simplify and speed up rounding */
++ /* op2 is either zero or subnormal. */
++ breq __avr32_f32_sub_op2_subnormal
++0:
++ /* Get shift amount to scale mantissa of op2. */
++ rsub r9, r10
++
++ /* Saturate the shift amount to 31. If the amount
++ is any larger op2 is insignificant. */
++ satu r9 >> 0, 5
++
++ /* Shift mantissa of op2 to same decimal point as the mantissa
++ of op1. */
++ lsr r11, r11, r9
++
++ /* Now subtract the mantissas. */
++ sub r8, r11
++
++ /* Normalize resulting mantissa. */
++ clz r9, r8
++ retcs 0
++ lsl r8, r8, r9
++ sub r10, r9
++ brle __avr32_f32_sub_subnormal_result
++#endif
++
++ /* Pack result. */
++ or r12, r12, r8 >> 8
++ bfins r12, r10, 23, 8
++
++ /* Round */
++__avr32_f32_sub_round:
++#if defined(L_avr32_f32_addsub)
++ mov_imm r10, 0x80000000
++ bld r12, 0
++ subne r10, -1
++ cp.w r9, r10
++ subhs r12, -1
++#else
++ bld r8, 7
++ acr r12
++#endif
++
++ ret r12
++
++
++__avr32_f32_sub_op2_subnormal:
++ /* Fix implicit bit and adjust exponent of subnormals. */
++ cbr r11, 31
++ /* Set exponent to 1 if we do not have a zero. */
++ movne r9,1
++
++ /* Check if op1 is also subnormal. */
++ cp.w r10, 0
++ brne 0b
++
++ cbr r8, 31
++ /* If op1 is not zero set exponent to 1. */
++ movne r10,1
++
++ rjmp 0b
++
++__avr32_f32_sub_op1_nan_or_inf:
++ /* Check if op1 is NaN, if so return NaN */
++ lsl r11, r8, 1
++ retne -1
++
++ /* op1 is Inf. */
++ bfins r12, r10, 23, 8 /* Generate Inf in r12 */
++
++ /* Check if op2 is Inf. or NaN */
++ lsr r11, r9, 23
++ cp.w r11, 0xff
++ retne r12 /* op2 not Inf or NaN, return op1 */
++
++ ret -1 /* op2 Inf or NaN, return NaN */
++
++__avr32_f32_sub_subnormal_result:
++ /* Check if the number is so small that
++ it will be represented with zero. */
++ rsub r10, r10, 9
++ rsub r11, r10, 32
++ retcs 0
++
++ /* Shift the mantissa into the correct position.*/
++ lsr r10, r8, r10
++ /* Add sign bit. */
++ or r12, r10
++
++ /* Put the shifted out bits in the most significant part
++ of r8. */
++ lsl r8, r8, r11
++
++#if defined(L_avr32_f32_addsub)
++ /* Add all the remainder bits used for rounding into r9 */
++ or r9, r8
++#else
++ lsr r8, 24
++#endif
++ rjmp __avr32_f32_sub_round
++
++
++ .align 2
++
++__avr32_f32_add_from_sub:
++ /* Switch sign on op2 */
++ eorh r11, 0x8000
++
++#if defined(L_avr32_f32_addsub_fast)
++ .global __avr32_f32_add_fast
++ .type __avr32_f32_add_fast,@function
++__avr32_f32_add_fast:
++#else
++ .global __avr32_f32_add
++ .type __avr32_f32_add,@function
++__avr32_f32_add:
++#endif
++
++ /* Check signs */
++ eor r8, r11, r12
++ /* Different signs, use subtraction. */
++ brmi __avr32_f32_sub_from_add
++
++ /* Get sign of op1 */
++ mov r8, r12
++ andh r12, 0x8000, COH
++
++ /* Remove sign from operands */
++ cbr r11, 31
++#if defined(L_avr32_f32_addsub_fast)
++ reteq r8 /* If op2 is zero return op1 */
++#endif
++ cbr r8, 31
++
++ /* Put the number with the largest exponent in r10
++ and the number with the smallest exponent in r9 */
++ max r10, r8, r11
++ min r9, r8, r11
++
++ /* Unpack exponent and mantissa of op1 */
++ lsl r8, r10, 8
++ sbr r8, 31 /* Set implicit bit. */
++ lsr r10, 23
++
++ /* op1 is NaN or Inf. */
++ cp.w r10, 0xff
++ breq __avr32_f32_add_op1_nan_or_inf
++
++ /* Unpack exponent and mantissa of op2 */
++ lsl r11, r9, 8
++ sbr r11, 31 /* Set implicit bit. */
++ lsr r9, 23
++
++#if defined(L_avr32_f32_addsub)
++ /* op2 is either zero or subnormal. */
++ breq __avr32_f32_add_op2_subnormal
++0:
++ /* Keep sticky bit for correct IEEE rounding */
++ st.w --sp, r12
++
++ /* Get shift amount to scale mantissa of op2. */
++ rsub r9, r10
++
++ /* Saturate the shift amount to 31. If the amount
++ is any larger op2 is insignificant. */
++ satu r9 >> 0, 5
++
++ /* Shift mantissa of op2 to same decimal point as the mantissa
++ of op1. */
++ lsr r12, r11, r9
++
++ /* Put the remainding bits into r11[23:..].*/
++ rsub r9, r9, (32-8)
++ lsl r11, r11, r9
++ /* Insert the bits we will remove from the mantissa into r11[31:24] */
++ bfins r11, r12, 24, 8
++
++ /* Now add the mantissas. */
++ add r8, r12
++
++ ld.w r12, sp++
++#else
++ /* Ignore sticky bit to simplify and speed up rounding */
++ /* op2 is either zero or subnormal. */
++ breq __avr32_f32_add_op2_subnormal
++0:
++ /* Get shift amount to scale mantissa of op2. */
++ rsub r9, r10
++
++ /* Saturate the shift amount to 31. If the amount
++ is any larger op2 is insignificant. */
++ satu r9 >> 0, 5
++
++ /* Shift mantissa of op2 to same decimal point as the mantissa
++ of op1. */
++ lsr r11, r11, r9
++
++ /* Now add the mantissas. */
++ add r8, r11
++
++#endif
++ /* Check if we overflowed. */
++ brcs __avr32_f32_add_res_of
++1:
++ /* Pack result. */
++ or r12, r12, r8 >> 8
++ bfins r12, r10, 23, 8
++
++ /* Round */
++#if defined(L_avr32_f32_addsub)
++ mov_imm r10, 0x80000000
++ bld r12, 0
++ subne r10, -1
++ cp.w r11, r10
++ subhs r12, -1
++#else
++ bld r8, 7
++ acr r12
++#endif
++
++ ret r12
++
++__avr32_f32_add_op2_subnormal:
++ /* Fix implicit bit and adjust exponent of subnormals. */
++ cbr r11, 31
++ /* Set exponent to 1 if we do not have a zero. */
++ movne r9,1
++
++ /* Check if op1 is also subnormal. */
++ cp.w r10, 0
++ brne 0b
++ /* Both operands subnormal, just add the mantissas and
++ pack. If the addition of the subnormal numbers results
++ in a normal number then the exponent will automatically
++ be set to 1 by the addition. */
++ cbr r8, 31
++ add r11, r8
++ or r12, r12, r11 >> 8
++ ret r12
++
++__avr32_f32_add_op1_nan_or_inf:
++ /* Check if op1 is NaN, if so return NaN */
++ lsl r11, r8, 1
++ retne -1
++
++ /* op1 is Inf. */
++ bfins r12, r10, 23, 8 /* Generate Inf in r12 */
++
++ /* Check if op2 is Inf. or NaN */
++ lsr r11, r9, 23
++ cp.w r11, 0xff
++ retne r12 /* op2 not Inf or NaN, return op1 */
++
++ lsl r9, 9
++ reteq r12 /* op2 Inf return op1 */
++ ret -1 /* op2 is NaN, return NaN */
++
++__avr32_f32_add_res_of:
++ /* We overflowed. Increase exponent and shift mantissa.*/
++ lsr r8, 1
++ sub r10, -1
++
++ /* Clear mantissa to set result to Inf if the exponent is 255. */
++ cp.w r10, 255
++ moveq r8, 0
++ moveq r11, 0
++ rjmp 1b
++
++
++#endif
++
++
++#if defined(L_avr32_f32_div) || defined(L_avr32_f32_div_fast)
++ .align 2
++
++#if defined(L_avr32_f32_div_fast)
++ .global __avr32_f32_div_fast
++ .type __avr32_f32_div_fast,@function
++__avr32_f32_div_fast:
++#else
++ .global __avr32_f32_div
++ .type __avr32_f32_div,@function
++__avr32_f32_div:
++#endif
++
++ eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
++
++ /* Unpack */
++ lsl r12,1
++ lsl r11,1
++ breq 4f /* Check op2 for zero */
++
++ tst r12, r12
++ moveq r9, 0
++ breq 12f
++
++ /* Unpack op1*/
++ /* exp: r9 */
++ /* sf: r12 */
++ lsr r9, r12, 24
++ breq 11f /*If number is subnormal*/
++ cp r9, 0xff
++ brhs 2f /* Check op1 for NaN or Inf */
++ lsl r12, 7
++ sbr r12, 31 /*Implicit bit*/
++12:
++
++ /* Unpack op2*/
++ /* exp: r10 */
++ /* sf: r11 */
++ lsr r10, r11, 24
++ breq 13f /*If number is subnormal*/
++ cp r10, 0xff
++ brhs 3f /* Check op2 for NaN or Inf */
++ lsl r11,7
++ sbr r11, 31 /*Implicit bit*/
++
++ cp.w r9, 0
++ subfeq r12, 0
++ reteq 0 /* op1 is zero and op2 is not zero */
++ /* or NaN so return zero */
++
++14:
++
++ /* For UC3, store with predecrement is faster than stm */
++ st.w --sp, r5
++ st.d --sp, r6
++
++ /* Calculate new exponent */
++ sub r9, r10
++ sub r9,-127
++
++ /* Divide */
++ /* Approximating 1/d with the following recurrence: */
++ /* R[j+1] = R[j]*(2-R[j]*d) */
++ /* Using 2.30 format */
++ /* TWO: r10 */
++ /* d: r5 */
++ /* Multiply result : r6, r7 */
++ /* Initial guess : r11 */
++ /* New approximations : r11 */
++ /* Dividend : r12 */
++
++ /* Load TWO */
++ mov_imm r10, 0x80000000
++
++ lsr r12, 2 /* Get significand of Op1 in 2.30 format */
++ lsr r5, r11, 2 /* Get significand of Op2 (=d) in 2.30 format */
++
++ /* Load initial guess, using look-up table */
++ /* Initial guess is of format 01.XY, where XY is constructed as follows: */
++ /* Let d be of following format: 00.1xy....., then XY=~xy */
++ /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
++ /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
++ /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
++ /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
++
++ lsr r11, r10, 1
++ bfextu r6, r5, 27, 2
++ com r6
++ bfins r11, r6, 28, 2
++
++ /* First approximation */
++ /* r7 = R[j]*d */
++ mulu.d r6, r11, r5
++ /* r7 = 2-R[j]*d */
++ sub r7, r10, r7<<2
++ /* r11 = R[j]*(2-R[j]*d) */
++ mulu.d r6, r11, r7
++ lsl r11, r7, 2
++
++ /* Second approximation */
++ /* r7 = R[j]*d */
++ mulu.d r6, r11, r5
++ /* r7 = 2-R[j]*d */
++ sub r7, r10, r7<<2
++ /* r11 = R[j]*(2-R[j]*d) */
++ mulu.d r6, r11, r7
++ lsl r11, r7, 2
++
++ /* Third approximation */
++ /* r7 = R[j]*d */
++ mulu.d r6, r11, r5
++ /* r7 = 2-R[j]*d */
++ sub r7, r10, r7<<2
++ /* r11 = R[j]*(2-R[j]*d) */
++ mulu.d r6, r11, r7
++ lsl r11, r7, 2
++
++ /* Fourth approximation */
++ /* r7 = R[j]*d */
++ mulu.d r6, r11, r5
++ /* r7 = 2-R[j]*d */
++ sub r7, r10, r7<<2
++ /* r11 = R[j]*(2-R[j]*d) */
++ mulu.d r6, r11, r7
++ lsl r11, r7, 2
++
++
++ /* Multiply with dividend to get quotient, r7 = sf(op1)/sf(op2) */
++ mulu.d r6, r11, r12
++
++ /* Shift by 3 to get result in 1.31 format, as required by the exponent. */
++ /* Note that 1.31 format is already used by the exponent in r9, since */
++ /* a bias of 127 was added to the result exponent, even though the implicit */
++ /* bit was inserted. This gives the exponent an additional bias of 1, which */
++ /* supports 1.31 format. */
++ //lsl r10, r7, 3
++
++ /* Adjust exponent and mantissa in case the result is of format
++ 0000.1xxx to 0001.xxx*/
++#if defined(L_avr32_f32_div)
++ lsr r12, 4 /* Scale dividend to 6.26 format to match the
++ result of the multiplication of the divisor and
++ quotient to get the remainder. */
++#endif
++ bld r7, 31-3
++ breq 0f
++ lsl r7, 1
++ sub r9, 1
++#if defined(L_avr32_f32_div)
++ lsl r12, 1 /* Scale dividend to 5.27 format to match the
++ result of the multiplication of the divisor and
++ quotient to get the remainder. */
++#endif
++0:
++ cp r9, 0
++ brle __avr32_f32_div_res_subnormal /* Result was subnormal. */
++
++
++#if defined(L_avr32_f32_div)
++ /* In order to round correctly we calculate the remainder:
++ Remainder = dividend[r12] - divisor[r5]*quotient[r7]
++ for the case when the quotient is halfway between the round-up
++ value and the round down value. If the remainder then is negative
++ it means that the quotient was to big and that it should not be
++ rounded up, if the remainder is positive the quotient was to small
++ and we need to round up. If the remainder is zero it means that the
++ quotient is exact but since we need to remove the guard bit we should
++ round to even. */
++ andl r7, 0xffe0
++ orl r7, 0x0010
++
++ /* Now do the multiplication. The quotient has the format 4.28
++ while the divisor has the format 2.30 which gives a result
++ of 6.26 */
++ mulu.d r10, r5, r7
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
++ cp r10, 0
++__avr32_f32_div_round_subnormal:
++ cpc r11, r12
++ srlo r11 /* Remainder positive: we need to round up.*/
++ moveq r11, r5 /* Remainder zero: round up if mantissa odd. */
++#else
++ bfextu r11, r7, 4, 1 /* Get guard bit */
++#endif
++
++ /* Pack final result*/
++ lsr r12, r7, 5
++ bfins r12, r9, 23, 8
++ /* For UC3, load with postincrement is faster than ldm */
++ ld.d r6, sp++
++ ld.w r5, sp++
++ bld r8, 31
++ bst r12, 31
++ /* Rounding add. */
++ add r12, r11
++ ret r12
++
++__divsf_return_op1:
++ lsl r8, 1
++ ror r12
++ ret r12
++
++
++2:
++ /* Op1 is NaN or inf */
++ retne -1 /* Return NaN if op1 is NaN */
++ /* Op1 is inf check op2 */
++ mov_imm r9, 0xff000000
++ cp r11, r9
++ brlo __divsf_return_op1 /* inf/number gives inf */
++ ret -1 /* The rest gives NaN*/
++3:
++ /* Op2 is NaN or inf */
++ reteq 0 /* Return zero if number/inf*/
++ ret -1 /* Return NaN*/
++4:
++ /* Op1 is zero ? */
++ tst r12,r12
++ reteq -1 /* 0.0/0.0 is NaN */
++ /* Op1 is Nan? */
++ lsr r9, r12, 24
++ breq 11f /*If number is subnormal*/
++ cp r9, 0xff
++ brhs 2b /* Check op1 for NaN or Inf */
++ /* Nonzero/0.0 is Inf. Sign bit will be shifted in before returning*/
++ mov_imm r12, 0xff000000
++ rjmp __divsf_return_op1
++
++11: /* Op1 was denormal. Fix it. */
++ lsl r12,7
++ clz r9,r12
++ lsl r12,r12,r9
++ rsub r9,r9,1
++ rjmp 12b
++
++13: /* Op2 was denormal. Fix it. */
++ lsl r11,7
++ clz r10,r11
++ lsl r11,r11,r10
++ rsub r10,r10,1
++ rjmp 14b
++
++
++__avr32_f32_div_res_subnormal: /* Divide result was subnormal */
++#if defined(L_avr32_f32_div)
++ /* Check how much we must scale down the mantissa. */
++ neg r9
++ sub r9, -1 /* We do no longer have an implicit bit. */
++ satu r9 >> 0, 5 /* Saturate shift amount to max 32. */
++ /* Scale down quotient */
++ rsub r10, r9, 32
++ lsr r7, r7, r9
++ /* Scale down the dividend to match the scaling of the quotient. */
++ lsl r6, r12, r10 /* Make the divident 64-bit and put the lsw in r6 */
++ lsr r12, r12, r9
++
++ /* Start performing the same rounding as done for normal numbers
++ but this time we have scaled the quotient and dividend and hence
++ need a little different comparison. */
++ andl r7, 0xffe0
++ orl r7, 0x0010
++
++ /* Now do the multiplication. The quotient has the format 4.28
++ while the divisor has the format 2.30 which gives a result
++ of 6.26 */
++ mulu.d r10, r5, r7
++
++ /* Set exponent to 0 */
++ mov r9, 0
++
++ /* Check if remainder is positive, negative or equal. */
++ bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
++ cp r10, r6
++ rjmp __avr32_f32_div_round_subnormal
++
++#else
++ ld.d r6, sp++
++ ld.w r5, sp++
++ /*Flush to zero*/
++ ret 0
++#endif
++#endif
++
++#ifdef L_avr32_f32_mul
++ .global __avr32_f32_mul
++ .type __avr32_f32_mul,@function
++
++
++__avr32_f32_mul:
++ mov r8, r12
++ eor r12, r11 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
++ andh r12, 0x8000, COH
++
++ /* arrange operands so that that op1 >= op2 */
++ cbr r8, 31
++ breq __avr32_f32_mul_op1_zero
++ cbr r11, 31
++
++ /* Put the number with the largest exponent in r10
++ and the number with the smallest exponent in r9 */
++ max r10, r8, r11
++ min r9, r8, r11
++
++ /* Unpack exponent and mantissa of op1 */
++ lsl r8, r10, 8
++ sbr r8, 31 /* Set implicit bit. */
++ lsr r10, 23
++
++ /* op1 is NaN or Inf. */
++ cp.w r10, 0xff
++ breq __avr32_f32_mul_op1_nan_or_inf
++
++ /* Unpack exponent and mantissa of op2 */
++ lsl r11, r9, 8
++ sbr r11, 31 /* Set implicit bit. */
++ lsr r9, 23
++
++ /* op2 is either zero or subnormal. */
++ breq __avr32_f32_mul_op2_subnormal
++0:
++ /* Calculate new exponent */
++ add r9,r10
++
++ /* Do the multiplication */
++ mulu.d r10,r8,r11
++
++ /* We might need to scale up by two if the MSB of the result is
++ zero. */
++ lsl r8, r11, 1
++ movcc r11, r8
++ subcc r9, 1
++
++ /* Put the shifted out bits of the mantissa into r10 */
++ lsr r10, 8
++ bfins r10, r11, 24, 8
++
++ sub r9,(127-1) /* remove extra exponent bias */
++ brle __avr32_f32_mul_res_subnormal
++
++ /* Check for Inf. */
++ cp.w r9, 0xff
++ brge 1f
++
++ /* Pack result. */
++ or r12, r12, r11 >> 8
++ bfins r12, r9, 23, 8
++
++ /* Round */
++__avr32_f32_mul_round:
++ mov_imm r8, 0x80000000
++ bld r12, 0
++ subne r8, -1
++
++ cp.w r10, r8
++ subhs r12, -1
++
++ ret r12
++
++1:
++ /* Return Inf */
++ orh r12, 0x7f80
++ ret r12
++
++__avr32_f32_mul_op2_subnormal:
++ cbr r11, 31
++ clz r9, r11
++ retcs 0 /* op2 is zero. Return 0 */
++ sub r9, 8
++ lsl r11, r11, r9
++ rsub r9, r9, 1
++
++ /* Check if op2 is subnormal. */
++ tst r10, r10
++ brne 0b
++
++ /* op2 is subnormal */
++ cbr r8, 31
++ clz r10, r11
++ retcs 0 /* op1 is zero. Return 0 */
++ lsl r8, r8, r10
++ rsub r10, r10, 1
++
++ rjmp 0b
++
++
++__avr32_f32_mul_op1_nan_or_inf:
++ /* Check if op1 is NaN, if so return NaN */
++ lsl r11, r8, 1
++ retne -1
++
++ /* op1 is Inf. */
++ tst r9, r9
++ reteq -1 /* Inf * 0 -> NaN */
++
++ bfins r12, r10, 23, 8 /* Generate Inf in r12 */
++
++ /* Check if op2 is Inf. or NaN */
++ lsr r11, r9, 23
++ cp.w r11, 0xff
++ retne r12 /* op2 not Inf or NaN, return Info */
++
++ lsl r9, 9
++ reteq r12 /* op2 Inf return Inf */
++ ret -1 /* op2 is NaN, return NaN */
++
++__avr32_f32_mul_res_subnormal:
++ /* Check if the number is so small that
++ it will be represented with zero. */
++ rsub r9, r9, 9
++ rsub r8, r9, 32
++ retcs 0
++
++ /* Shift the mantissa into the correct position.*/
++ lsr r9, r11, r9
++ /* Add sign bit. */
++ or r12, r9
++ /* Put the shifted out bits in the most significant part
++ of r8. */
++ lsl r11, r11, r8
++
++ /* Add all the remainder bits used for rounding into r11 */
++ andh r10, 0x00FF
++ or r10, r11
++ rjmp __avr32_f32_mul_round
++
++__avr32_f32_mul_op1_zero:
++ bfextu r10, r11, 23, 8
++ cp.w r10, 0xff
++ retne r12
++ reteq -1
++
++#endif
++
++
++#ifdef L_avr32_s32_to_f32
++ .global __avr32_s32_to_f32
++ .type __avr32_s32_to_f32,@function
++__avr32_s32_to_f32:
++ cp r12, 0
++ reteq r12 /* If zero then return zero float */
++ mov r11, r12 /* Keep the sign */
++ abs r12 /* Compute the absolute value */
++ mov r10, 31 + 127 /* Set the correct exponent */
++
++ /* Normalize */
++ normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
++
++ /* Check for subnormal result */
++ cp.w r10, 0
++ brle __avr32_s32_to_f32_subnormal
++
++ round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
++ pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
++ lsl r11, 1
++ ror r12
++ ret r12
++
++__avr32_s32_to_f32_subnormal:
++ /* Adjust a subnormal result */
++ adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/
++ ret r12
++
++#endif
++
++#ifdef L_avr32_u32_to_f32
++ .global __avr32_u32_to_f32
++ .type __avr32_u32_to_f32,@function
++__avr32_u32_to_f32:
++ cp r12, 0
++ reteq r12 /* If zero then return zero float */
++ mov r10, 31 + 127 /* Set the correct exponent */
++
++ /* Normalize */
++ normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
++
++ /* Check for subnormal result */
++ cp.w r10, 0
++ brle __avr32_u32_to_f32_subnormal
++
++ round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
++ pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
++ lsr r12,1 /* Sign bit is 0 for unsigned int */
++ ret r12
++
++__avr32_u32_to_f32_subnormal:
++ /* Adjust a subnormal result */
++ mov r8, 0
++ adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/
++ ret r12
++
++
++#endif
++
++
++#ifdef L_avr32_f32_to_s32
++ .global __avr32_f32_to_s32
++ .type __avr32_f32_to_s32,@function
++__avr32_f32_to_s32:
++ bfextu r11, r12, 23, 8
++ sub r11,127 /* Fix bias */
++ retlo 0 /* Negative exponent yields zero integer */
++
++ /* Shift mantissa into correct position */
++ rsub r11,r11,31 /* Shift amount */
++ lsl r10,r12,8 /* Get mantissa */
++ sbr r10,31 /* Add implicit bit */
++ lsr r10,r10,r11 /* Perform shift */
++ lsl r12,1 /* Check sign */
++ retcc r10 /* if positive, we are done */
++ neg r10 /* if negative float, negate result */
++ ret r10
++
++#endif
++
++#ifdef L_avr32_f32_to_u32
++ .global __avr32_f32_to_u32
++ .type __avr32_f32_to_u32,@function
++__avr32_f32_to_u32:
++ cp r12,0
++ retmi 0 /* Negative numbers gives 0 */
++ bfextu r11, r12, 23, 8 /* Extract exponent */
++ sub r11,127 /* Fix bias */
++ retlo 0 /* Negative exponent yields zero integer */
++
++ /* Shift mantissa into correct position */
++ rsub r11,r11,31 /* Shift amount */
++ lsl r12,8 /* Get mantissa */
++ sbr r12,31 /* Add implicit bit */
++ lsr r12,r12,r11 /* Perform shift */
++ ret r12
++
++#endif
++
++#ifdef L_avr32_f32_to_f64
++ .global __avr32_f32_to_f64
++ .type __avr32_f32_to_f64,@function
++
++__avr32_f32_to_f64:
++ lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/
++ moveq r10, 0
++ reteq r11 /* Return zero if input is zero */
++
++ bfextu r9,r11,24,8 /* Get exponent */
++ cp.w r9,0xff /* check for NaN or inf */
++ breq 0f
++
++ lsl r11,7 /* Convert sf mantissa to df format */
++ mov r10,0
++
++ /* Check if implicit bit should be set */
++ cp.w r9, 0
++ subeq r9,-1 /* Adjust exponent if it was 0 */
++ srne r8
++ or r11, r11, r8 << 31 /* Set implicit bit if needed */
++ sub r9,(127-0x3ff) /* Convert exponent to df format exponent */
++
++ /*We know that low register of mantissa is 0, and will be unaffected by normalization.*/
++ /*We can therefore use the faster normalize_sf function instead of normalize_df.*/
++ normalize_sf r9 /*exp*/, r11 /*mantissa*/, r8 /*scratch*/
++ pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/
++
++__extendsfdf_return_op1:
++ /* Rotate in sign bit */
++ lsl r12, 1
++ ror r11
++ ret r11
++
++0:
++ /* Inf or NaN*/
++ mov_imm r10, 0xffe00000
++ lsl r11,8 /* check mantissa */
++ movne r11, -1 /* Return NaN */
++ moveq r11, r10 /* Return inf */
++ mov r10, 0
++ rjmp __extendsfdf_return_op1
++#endif
++
++
++#ifdef L_avr32_f64_to_f32
++ .global __avr32_f64_to_f32
++ .type __avr32_f64_to_f32,@function
++
++__avr32_f64_to_f32:
++ /* Unpack */
++ lsl r9,r11,1 /* Unpack exponent */
++ lsr r9,21
++
++ reteq 0 /* If exponent is 0 the number is so small
++ that the conversion to single float gives
++ zero */
++
++ lsl r8,r11,10 /* Adjust mantissa */
++ or r12,r8,r10>>22
++
++ lsl r10,10 /* Check if there are any remaining bits
++ in the low part of the mantissa.*/
++ neg r10
++ rol r12 /* If there were remaining bits then set lsb
++ of mantissa to 1 */
++
++ cp r9,0x7ff
++ breq 2f /* Check for NaN or inf */
++
++ sub r9,(0x3ff-127) /* Adjust bias of exponent */
++ sbr r12,31 /* set the implicit bit.*/
++
++ cp.w r9, 0 /* Check for subnormal number */
++ brle 3f
++
++ round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/
++ pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
++__truncdfsf_return_op1:
++ /* Rotate in sign bit */
++ lsl r11, 1
++ ror r12
++ ret r12
++
++2:
++ /* NaN or inf */
++ cbr r12,31 /* clear implicit bit */
++ retne -1 /* Return NaN if mantissa not zero */
++ mov_imm r12, 0x7f800000
++ ret r12 /* Return inf */
++
++3: /* Result is subnormal. Adjust it.*/
++ adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/
++ ret r12
++
++
++#endif
++
++#if defined(L_mulsi3) && defined(__AVR32_NO_MUL__)
++ .global __mulsi3
++ .type __mulsi3,@function
++
++__mulsi3:
++ mov r9, 0
++0:
++ lsr r11, 1
++ addcs r9, r9, r12
++ breq 1f
++ lsl r12, 1
++ rjmp 0b
++1:
++ ret r9
++#endif
+--- /dev/null
++++ b/gcc/config/avr32/lib2funcs.S
+@@ -0,0 +1,21 @@
++ .align 4
++ .global __nonlocal_goto
++ .type __nonlocal_goto,@function
++
++/* __nonlocal_goto: This function handles nonlocal_goto's in gcc.
++
++ parameter 0 (r12) = New Frame Pointer
++ parameter 1 (r11) = Address to goto
++ parameter 2 (r10) = New Stack Pointer
++
++ This function invalidates the return stack, since it returns from a
++ function without using a return instruction.
++*/
++__nonlocal_goto:
++ mov r7, r12
++ mov sp, r10
++ frs # Flush return stack
++ mov pc, r11
++
++
++
+--- /dev/null
++++ b/gcc/config/avr32/linux-elf.h
+@@ -0,0 +1,151 @@
++/*
++ Linux/Elf specific definitions.
++ Copyright 2003-2006 Atmel Corporation.
++
++ Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
++ and H�vard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
++
++ This file is part of GCC.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
++
++
++
++/* elfos.h should have already been included. Now just override
++ any conflicting definitions and add any extras. */
++
++/* Run-time Target Specification. */
++#undef TARGET_VERSION
++#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr);
++
++/* Do not assume anything about header files. */
++#define NO_IMPLICIT_EXTERN_C
++
++/* The GNU C++ standard library requires that these macros be defined. */
++#undef CPLUSPLUS_CPP_SPEC
++#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
++
++/* Now we define the strings used to build the spec file. */
++#undef LIB_SPEC
++#define LIB_SPEC \
++ "%{pthread:-lpthread} \
++ %{shared:-lc} \
++ %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
++
++/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
++ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
++ provides part of the support for getting C++ file-scope static
++ object constructed before entering `main'. */
++
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC \
++ "%{!shared: \
++ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
++ %{!p:%{profile:gcrt1.o%s} \
++ %{!profile:crt1.o%s}}}} \
++ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
++
++/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
++ the GNU/Linux magical crtend.o file (see crtstuff.c) which
++ provides part of the support for getting C++ file-scope static
++ object constructed before entering `main', followed by a normal
++ GNU/Linux "finalizer" file, `crtn.o'. */
++
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC \
++ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
++
++#undef ASM_SPEC
++#define ASM_SPEC "%{!mno-pic:%{!fno-pic:--pic}} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
++
++#undef LINK_SPEC
++#define LINK_SPEC "%{version:-v} \
++ %{static:-Bstatic} \
++ %{shared:-shared} \
++ %{symbolic:-Bsymbolic} \
++ %{rdynamic:-export-dynamic} \
++ %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \
++ %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}"
++
++#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
++
++/* This is how we tell the assembler that two symbols have the same value. */
++#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
++ do \
++ { \
++ assemble_name (FILE, NAME1); \
++ fputs (" = ", FILE); \
++ assemble_name (FILE, NAME2); \
++ fputc ('\n', FILE); \
++ } \
++ while (0)
++
++
++
++#undef CC1_SPEC
++#define CC1_SPEC "%{profile:-p}"
++
++/* Target CPU builtins. */
++#define TARGET_CPU_CPP_BUILTINS() \
++ do \
++ { \
++ builtin_define ("__avr32__"); \
++ builtin_define ("__AVR32__"); \
++ builtin_define ("__AVR32_LINUX__"); \
++ builtin_define (avr32_part->macro); \
++ builtin_define (avr32_arch->macro); \
++ if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
++ builtin_define ("__AVR32_AVR32A__"); \
++ else \
++ builtin_define ("__AVR32_AVR32B__"); \
++ if (TARGET_UNALIGNED_WORD) \
++ builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
++ if (TARGET_SIMD) \
++ builtin_define ("__AVR32_HAS_SIMD__"); \
++ if (TARGET_DSP) \
++ builtin_define ("__AVR32_HAS_DSP__"); \
++ if (TARGET_RMW) \
++ builtin_define ("__AVR32_HAS_RMW__"); \
++ if (TARGET_BRANCH_PRED) \
++ builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
++ if (TARGET_FAST_FLOAT) \
++ builtin_define ("__AVR32_FAST_FLOAT__"); \
++ } \
++ while (0)
++
++
++
++/* Call the function profiler with a given profile label. */
++#undef FUNCTION_PROFILER
++#define FUNCTION_PROFILER(STREAM, LABELNO) \
++ do \
++ { \
++ fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \
++ fprintf (STREAM, "\ticall lr\n"); \
++ } \
++ while (0)
++
++#define NO_PROFILE_COUNTERS 1
++
++/* For dynamic libraries to work */
++/* #define PLT_REG_CALL_CLOBBERED 1 */
++#define AVR32_ALWAYS_PIC 1
++
++/* uclibc does not implement sinf, cosf etc. */
++#undef TARGET_C99_FUNCTIONS
++#define TARGET_C99_FUNCTIONS 0
++
++#define LINK_GCC_C_SEQUENCE_SPEC \
++ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+--- /dev/null
++++ b/gcc/config/avr32/predicates.md
+@@ -0,0 +1,422 @@
++;; AVR32 predicates file.
++;; Copyright 2003-2006 Atmel Corporation.
++;;
++;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
++;;
++;; This file is part of GCC.
++;;
++;; This program is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 2 of the License, or
++;; (at your option) any later version.
++;;
++;; This program is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with this program; if not, write to the Free Software
++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++
++
++;; True if the operand is a memory reference which contains an
++;; Address consisting of a single pointer register
++(define_predicate "avr32_indirect_register_operand"
++ (and (match_code "mem")
++ (match_test "register_operand(XEXP(op, 0), SImode)")))
++
++
++
++;; Address expression with a base pointer offset with
++;; a register displacement
++(define_predicate "avr32_indexed_memory_operand"
++ (and (match_code "mem")
++ (match_test "GET_CODE(XEXP(op, 0)) == PLUS"))
++ {
++
++ rtx op0 = XEXP(XEXP(op, 0), 0);
++ rtx op1 = XEXP(XEXP(op, 0), 1);
++
++ return ((avr32_address_register_rtx_p (op0, 0)
++ && avr32_legitimate_index_p (GET_MODE(op), op1, 0))
++ || (avr32_address_register_rtx_p (op1, 0)
++ && avr32_legitimate_index_p (GET_MODE(op), op0, 0)));
++
++ })
++
++;; Operand suitable for the ld.sb instruction
++(define_predicate "load_sb_memory_operand"
++ (ior (match_operand 0 "avr32_indirect_register_operand")
++ (match_operand 0 "avr32_indexed_memory_operand")))
++
++
++;; Operand suitable as operand to insns sign extending QI values
++(define_predicate "extendqi_operand"
++ (ior (match_operand 0 "load_sb_memory_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "post_inc_memory_operand"
++ (and (match_code "mem")
++ (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC)
++ && REG_P(XEXP(XEXP(op, 0), 0))")))
++
++(define_predicate "pre_dec_memory_operand"
++ (and (match_code "mem")
++ (match_test "(GET_CODE(XEXP(op, 0)) == PRE_DEC)
++ && REG_P(XEXP(XEXP(op, 0), 0))")))
++
++;; Operand suitable for add instructions
++(define_predicate "avr32_add_operand"
++ (ior (match_operand 0 "register_operand")
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")"))))
++
++;; Operand is a power of two immediate
++(define_predicate "power_of_two_operand"
++ (match_code "const_int")
++{
++ HOST_WIDE_INT value = INTVAL (op);
++
++ return value != 0 && (value & (value - 1)) == 0;
++})
++
++;; Operand is a multiple of 8 immediate
++(define_predicate "multiple_of_8_operand"
++ (match_code "const_int")
++{
++ HOST_WIDE_INT value = INTVAL (op);
++
++ return (value & 0x7) == 0 ;
++})
++
++;; Operand is a multiple of 16 immediate
++(define_predicate "multiple_of_16_operand"
++ (match_code "const_int")
++{
++ HOST_WIDE_INT value = INTVAL (op);
++
++ return (value & 0xf) == 0 ;
++})
++
++;; Operand is a mask used for masking away upper bits of a reg
++(define_predicate "avr32_mask_upper_bits_operand"
++ (match_code "const_int")
++{
++ HOST_WIDE_INT value = INTVAL (op) + 1;
++
++ return value != 1 && value != 0 && (value & (value - 1)) == 0;
++})
++
++
++;; Operand suitable for mul instructions
++(define_predicate "avr32_mul_operand"
++ (ior (match_operand 0 "register_operand")
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
++
++;; True for logical binary operators.
++(define_predicate "logical_binary_operator"
++ (match_code "ior,xor,and"))
++
++;; True for logical shift operators
++(define_predicate "logical_shift_operator"
++ (match_code "ashift,lshiftrt"))
++
++;; True for shift operand for logical and, or and eor insns
++(define_predicate "avr32_logical_shift_operand"
++ (and (match_code "ashift,lshiftrt")
++ (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT")
++ (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
++ (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
++ (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
++ )
++
++
++;; Predicate for second operand to and, ior and xor insn patterns
++(define_predicate "avr32_logical_insn_operand"
++ (ior (match_operand 0 "register_operand")
++ (match_operand 0 "avr32_logical_shift_operand"))
++)
++
++
++;; True for avr32 comparison operators
++(define_predicate "avr32_comparison_operator"
++ (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
++ (and (match_code "unspec")
++ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
++ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
++
++(define_predicate "avr32_cond3_comparison_operator"
++ (ior (match_code "eq, ne, ge, lt, geu, ltu")
++ (and (match_code "unspec")
++ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
++ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
++
++;; True for avr32 comparison operand
++(define_predicate "avr32_comparison_operand"
++ (ior (and (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
++ (match_test "(CC0_P (XEXP(op,0)) && rtx_equal_p (XEXP(op,1), const0_rtx))"))
++ (and (match_code "unspec")
++ (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
++ || (XINT(op, 1) == UNSPEC_COND_PL)"))))
++
++;; True if this is a const_int with one bit set
++(define_predicate "one_bit_set_operand"
++ (match_code "const_int")
++ {
++ int i;
++ int value;
++ int ones = 0;
++
++ value = INTVAL(op);
++ for ( i = 0 ; i < 32; i++ ){
++ if ( value & ( 1 << i ) ){
++ ones++;
++ }
++ }
++
++ return ( ones == 1 );
++ })
++
++
++;; True if this is a const_int with one bit cleared
++(define_predicate "one_bit_cleared_operand"
++ (match_code "const_int")
++ {
++ int i;
++ int value;
++ int zeroes = 0;
++
++ value = INTVAL(op);
++ for ( i = 0 ; i < 32; i++ ){
++ if ( !(value & ( 1 << i )) ){
++ zeroes++;
++ }
++ }
++
++ return ( zeroes == 1 );
++ })
++
++
++;; Immediate all the low 16-bits cleared
++(define_predicate "avr32_hi16_immediate_operand"
++ (match_code "const_int")
++ {
++ /* If the low 16-bits are zero then this
++ is a hi16 immediate. */
++ return ((INTVAL(op) & 0xffff) == 0);
++ }
++)
++
++;; True if this is a register or immediate operand
++(define_predicate "register_immediate_operand"
++ (ior (match_operand 0 "register_operand")
++ (match_operand 0 "immediate_operand")))
++
++;; True if this is a register or const_int operand
++(define_predicate "register_const_int_operand"
++ (ior (match_operand 0 "register_operand")
++ (and (match_operand 0 "const_int_operand")
++ (match_operand 0 "immediate_operand"))))
++
++;; True if this is a register or const_double operand
++(define_predicate "register_const_double_operand"
++ (ior (match_operand 0 "register_operand")
++ (match_operand 0 "const_double_operand")))
++
++;; True if this is an operand containing a label_ref.
++(define_predicate "avr32_label_ref_operand"
++ (and (match_code "mem")
++ (match_test "avr32_find_symbol(op)
++ && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)")))
++
++;; True if this is a valid symbol pointing to the constant pool.
++(define_predicate "avr32_const_pool_operand"
++ (and (match_code "symbol_ref")
++ (match_test "CONSTANT_POOL_ADDRESS_P(op)"))
++ {
++ return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op))
++ || label_mentioned_p (get_pool_constant (op)))
++ || avr32_got_mentioned_p(get_pool_constant (op)))
++ : true);
++ }
++)
++
++;; True if this is a memory reference to the constant or mini pool.
++(define_predicate "avr32_const_pool_ref_operand"
++ (ior (match_operand 0 "avr32_label_ref_operand")
++ (and (match_code "mem")
++ (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))"))))
++
++
++;; Legal source operand for movti insns
++(define_predicate "avr32_movti_src_operand"
++ (ior (match_operand 0 "avr32_const_pool_ref_operand")
++ (ior (ior (match_operand 0 "register_immediate_operand")
++ (match_operand 0 "avr32_indirect_register_operand"))
++ (match_operand 0 "post_inc_memory_operand"))))
++
++;; Legal destination operand for movti insns
++(define_predicate "avr32_movti_dst_operand"
++ (ior (ior (match_operand 0 "register_operand")
++ (match_operand 0 "avr32_indirect_register_operand"))
++ (match_operand 0 "pre_dec_memory_operand")))
++
++
++;; True if this is a k12 offseted memory operand.
++(define_predicate "avr32_k12_memory_operand"
++ (and (match_code "mem")
++ (ior (match_test "REG_P(XEXP(op, 0))")
++ (match_test "GET_CODE(XEXP(op, 0)) == PLUS
++ && REG_P(XEXP(XEXP(op, 0), 0))
++ && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
++ && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)),
++ 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))"))))
++
++;; True if this is a memory operand with an immediate displacement.
++(define_predicate "avr32_imm_disp_memory_operand"
++ (and (match_code "mem")
++ (match_test "GET_CODE(XEXP(op, 0)) == PLUS
++ && REG_P(XEXP(XEXP(op, 0), 0))
++ && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)")))
++
++;; True if this is a bswap operand.
++(define_predicate "avr32_bswap_operand"
++ (ior (match_operand 0 "avr32_k12_memory_operand")
++ (match_operand 0 "register_operand")))
++
++;; True if this is a valid coprocessor insn memory operand.
++(define_predicate "avr32_cop_memory_operand"
++ (and (match_operand 0 "memory_operand")
++ (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS
++ && REG_P(XEXP(XEXP(op, 0), 0))
++ && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
++ && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))"))))
++
++;; True if this is a valid source/destination operand.
++;; for moving values to/from a coprocessor
++(define_predicate "avr32_cop_move_operand"
++ (ior (match_operand 0 "register_operand")
++ (match_operand 0 "avr32_cop_memory_operand")))
++
++
++;; True if this is a valid extract byte offset for use in
++;; load extracted index insns.
++(define_predicate "avr32_extract_shift_operand"
++ (and (match_operand 0 "const_int_operand")
++ (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8)
++ || (INTVAL(op) == 16) || (INTVAL(op) == 24)")))
++
++;; True if this is a valid avr32 symbol operand.
++(define_predicate "avr32_symbol_operand"
++ (and (match_code "label_ref, symbol_ref, const")
++ (match_test "avr32_find_symbol(op)")))
++
++;; True if this is a valid operand for the lda.w and call pseudo insns.
++(define_predicate "avr32_address_operand"
++ (and (and (match_code "label_ref, symbol_ref")
++ (match_test "avr32_find_symbol(op)"))
++ (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
++ (match_test "flag_pic")) ))
++
++;; An immediate k16 address operand
++(define_predicate "avr32_ks16_address_operand"
++ (and (match_operand 0 "address_operand")
++ (ior (match_test "REG_P(op)")
++ (match_test "GET_CODE(op) == PLUS
++ && ((GET_CODE(XEXP(op,0)) == CONST_INT)
++ || (GET_CODE(XEXP(op,1)) == CONST_INT))")) ))
++
++;; An offset k16 memory operand
++(define_predicate "avr32_ks16_memory_operand"
++ (and (match_code "mem")
++ (match_test "avr32_ks16_address_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
++
++;; An immediate k11 address operand
++(define_predicate "avr32_ks11_address_operand"
++ (and (match_operand 0 "address_operand")
++ (ior (match_test "REG_P(op)")
++ (match_test "GET_CODE(op) == PLUS
++ && (((GET_CODE(XEXP(op,0)) == CONST_INT)
++ && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,0)), 'K', \"Ks11\"))
++ || ((GET_CODE(XEXP(op,1)) == CONST_INT)
++ && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,1)), 'K', \"Ks11\")))")) ))
++
++;; True if this is a avr32 call operand
++(define_predicate "avr32_call_operand"
++ (ior (ior (match_operand 0 "register_operand")
++ (ior (match_operand 0 "avr32_const_pool_ref_operand")
++ (match_operand 0 "avr32_address_operand")))
++ (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)")))
++
++;; Return true for operators performing ALU operations
++
++(define_predicate "alu_operator"
++ (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt"))
++
++(define_predicate "avr32_add_shift_immediate_operand"
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")")))
++
++(define_predicate "avr32_cond_register_immediate_operand"
++ (ior (match_operand 0 "register_operand")
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
++
++(define_predicate "avr32_cond_immediate_operand"
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is08\")")))
++
++
++(define_predicate "avr32_cond_move_operand"
++ (ior (ior (match_operand 0 "register_operand")
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))
++ (and (match_test "TARGET_V2_INSNS")
++ (match_operand 0 "memory_operand"))))
++
++(define_predicate "avr32_mov_immediate_operand"
++ (and (match_operand 0 "immediate_operand")
++ (match_test "avr32_const_ok_for_move(INTVAL(op))")))
++
++
++(define_predicate "avr32_rmw_address_operand"
++ (ior (and (match_code "symbol_ref")
++ (match_test "({rtx symbol = avr32_find_symbol(op); \
++ symbol && (GET_CODE (symbol) == SYMBOL_REF) && SYMBOL_REF_RMW_ADDR(symbol);})"))
++ (and (match_operand 0 "immediate_operand")
++ (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks17\")")))
++ {
++ return TARGET_RMW && !flag_pic;
++ }
++)
++
++(define_predicate "avr32_rmw_memory_operand"
++ (and (match_code "mem")
++ (match_test "!volatile_refs_p(op) && (GET_MODE(op) == SImode) &&
++ avr32_rmw_address_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))")))
++
++(define_predicate "avr32_rmw_memory_or_register_operand"
++ (ior (match_operand 0 "avr32_rmw_memory_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "avr32_non_rmw_memory_operand"
++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
++ (match_operand 0 "memory_operand")))
++
++(define_predicate "avr32_non_rmw_general_operand"
++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
++ (match_operand 0 "general_operand")))
++
++(define_predicate "avr32_non_rmw_nonimmediate_operand"
++ (and (not (match_operand 0 "avr32_rmw_memory_operand"))
++ (match_operand 0 "nonimmediate_operand")))
++
++;; Return true if the operand is the 1.0f constant.
++
++(define_predicate "const_1f_operand"
++ (match_code "const_int,const_double")
++{
++ return (op == CONST1_RTX (SFmode));
++})
+--- /dev/null
++++ b/gcc/config/avr32/simd.md
+@@ -0,0 +1,145 @@
++;; AVR32 machine description file for SIMD instructions.
++;; Copyright 2003-2006 Atmel Corporation.
++;;
++;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
++;;
++;; This file is part of GCC.
++;;
++;; This program is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 2 of the License, or
++;; (at your option) any later version.
++;;
++;; This program is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with this program; if not, write to the Free Software
++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++
++;; -*- Mode: Scheme -*-
++
++
++;; Vector modes
++(define_mode_iterator VECM [V2HI V4QI])
++(define_mode_attr size [(V2HI "h") (V4QI "b")])
++
++(define_insn "add<mode>3"
++ [(set (match_operand:VECM 0 "register_operand" "=r")
++ (plus:VECM (match_operand:VECM 1 "register_operand" "r")
++ (match_operand:VECM 2 "register_operand" "r")))]
++ "TARGET_SIMD"
++ "padd.<size>\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++
++(define_insn "sub<mode>3"
++ [(set (match_operand:VECM 0 "register_operand" "=r")
++ (minus:VECM (match_operand:VECM 1 "register_operand" "r")
++ (match_operand:VECM 2 "register_operand" "r")))]
++ "TARGET_SIMD"
++ "psub.<size>\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++
++(define_insn "abs<mode>2"
++ [(set (match_operand:VECM 0 "register_operand" "=r")
++ (abs:VECM (match_operand:VECM 1 "register_operand" "r")))]
++ "TARGET_SIMD"
++ "pabs.s<size>\t%0, %1"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "ashl<mode>3"
++ [(set (match_operand:VECM 0 "register_operand" "=r")
++ (ashift:VECM (match_operand:VECM 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "Ku04")))]
++ "TARGET_SIMD"
++ "plsl.<size>\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "ashr<mode>3"
++ [(set (match_operand:VECM 0 "register_operand" "=r")
++ (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "Ku04")))]
++ "TARGET_SIMD"
++ "pasr.<size>\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "lshr<mode>3"
++ [(set (match_operand:VECM 0 "register_operand" "=r")
++ (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "Ku04")))]
++ "TARGET_SIMD"
++ "plsr.<size>\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "smaxv2hi3"
++ [(set (match_operand:V2HI 0 "register_operand" "=r")
++ (smax:V2HI (match_operand:V2HI 1 "register_operand" "r")
++ (match_operand:V2HI 2 "register_operand" "r")))]
++
++ "TARGET_SIMD"
++ "pmax.sh\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "sminv2hi3"
++ [(set (match_operand:V2HI 0 "register_operand" "=r")
++ (smin:V2HI (match_operand:V2HI 1 "register_operand" "r")
++ (match_operand:V2HI 2 "register_operand" "r")))]
++
++ "TARGET_SIMD"
++ "pmin.sh\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "umaxv4qi3"
++ [(set (match_operand:V4QI 0 "register_operand" "=r")
++ (umax:V4QI (match_operand:V4QI 1 "register_operand" "r")
++ (match_operand:V4QI 2 "register_operand" "r")))]
++
++ "TARGET_SIMD"
++ "pmax.ub\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "uminv4qi3"
++ [(set (match_operand:V4QI 0 "register_operand" "=r")
++ (umin:V4QI (match_operand:V4QI 1 "register_operand" "r")
++ (match_operand:V4QI 2 "register_operand" "r")))]
++
++ "TARGET_SIMD"
++ "pmin.ub\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++
++(define_insn "addsubv2hi"
++ [(set (match_operand:V2HI 0 "register_operand" "=r")
++ (vec_concat:V2HI
++ (plus:HI (match_operand:HI 1 "register_operand" "r")
++ (match_operand:HI 2 "register_operand" "r"))
++ (minus:HI (match_dup 1) (match_dup 2))))]
++ "TARGET_SIMD"
++ "paddsub.h\t%0, %1:b, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
++
++(define_insn "subaddv2hi"
++ [(set (match_operand:V2HI 0 "register_operand" "=r")
++ (vec_concat:V2HI
++ (minus:HI (match_operand:HI 1 "register_operand" "r")
++ (match_operand:HI 2 "register_operand" "r"))
++ (plus:HI (match_dup 1) (match_dup 2))))]
++ "TARGET_SIMD"
++ "psubadd.h\t%0, %1:b, %2:b"
++ [(set_attr "length" "4")
++ (set_attr "type" "alu")])
+--- /dev/null
++++ b/gcc/config/avr32/sync.md
+@@ -0,0 +1,244 @@
++;;=================================================================
++;; Atomic operations
++;;=================================================================
++
++
++(define_insn "sync_compare_and_swapsi"
++ [(set (match_operand:SI 0 "register_operand" "=&r,&r")
++ (match_operand:SI 1 "memory_operand" "+RKs16,+RKs16"))
++ (set (match_dup 1)
++ (unspec_volatile:SI
++ [(match_dup 1)
++ (match_operand:SI 2 "register_immediate_operand" "r,Ks21")
++ (match_operand:SI 3 "register_operand" "r,r")]
++ VUNSPEC_SYNC_CMPXCHG)) ]
++ ""
++ "0:
++ ssrf\t5
++ ld.w\t%0,%1
++ cp.w\t%0,%2
++ brne\t0f
++ stcond\t%1, %3
++ brne\t0b
++ 0:
++ "
++ [(set_attr "length" "16,18")
++ (set_attr "cc" "clobber")]
++ )
++
++
++(define_code_iterator atomic_op [plus minus and ior xor])
++(define_code_attr atomic_asm_insn [(plus "add") (minus "sub") (and "and") (ior "or") (xor "eor")])
++(define_code_attr atomic_insn [(plus "add") (minus "sub") (and "and") (ior "ior") (xor "xor")])
++
++(define_insn "sync_loadsi"
++ ; NB! Put an early clobber on the destination operand to
++ ; avoid gcc using the same register in the source and
++ ; destination. This is done in order to avoid gcc to
++ ; clobber the source operand since these instructions
++ ; are actually inside a "loop".
++ [(set (match_operand:SI 0 "register_operand" "=&r")
++ (unspec_volatile:SI
++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "RKs16")
++ (label_ref (match_operand 2 "" ""))]
++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD) )]
++ ""
++ "%2:
++ ssrf\t5
++ ld.w\t%0,%1"
++ [(set_attr "length" "6")
++ (set_attr "cc" "clobber")]
++ )
++
++(define_insn "sync_store_if_lock"
++ [(set (match_operand:SI 0 "avr32_ks16_memory_operand" "=RKs16")
++ (unspec_volatile:SI
++ [(match_operand:SI 1 "register_operand" "r")
++ (label_ref (match_operand 2 "" ""))]
++ VUNSPEC_SYNC_STORE_IF_LOCK) )]
++ ""
++ "stcond\t%0, %1
++ brne\t%2"
++ [(set_attr "length" "6")
++ (set_attr "cc" "clobber")]
++ )
++
++
++(define_expand "sync_<atomic_insn>si"
++ [(set (match_dup 2)
++ (unspec_volatile:SI
++ [(match_operand:SI 0 "avr32_ks16_memory_operand" "")
++ (match_dup 3)]
++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
++ (set (match_dup 2)
++ (atomic_op:SI (match_dup 2)
++ (match_operand:SI 1 "register_immediate_operand" "")))
++ (set (match_dup 0)
++ (unspec_volatile:SI
++ [(match_dup 2)
++ (match_dup 3)]
++ VUNSPEC_SYNC_STORE_IF_LOCK) )
++ (use (match_dup 1))
++ (use (match_dup 4))]
++ ""
++ {
++ rtx *mem_expr = &operands[0];
++ rtx ptr_reg;
++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
++ {
++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
++ XEXP (*mem_expr, 0) = ptr_reg;
++ }
++ else
++ {
++ rtx address = XEXP (*mem_expr, 0);
++ if ( REG_P (address) )
++ ptr_reg = address;
++ else if ( REG_P (XEXP (address, 0)) )
++ ptr_reg = XEXP (address, 0);
++ else
++ ptr_reg = XEXP (address, 1);
++ }
++
++ operands[2] = gen_reg_rtx (SImode);
++ operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
++ operands[4] = ptr_reg;
++
++ }
++ )
++
++
++
++(define_expand "sync_old_<atomic_insn>si"
++ [(set (match_operand:SI 0 "register_operand" "")
++ (unspec_volatile:SI
++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
++ (match_dup 4)]
++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
++ (set (match_dup 3)
++ (atomic_op:SI (match_dup 0)
++ (match_operand:SI 2 "register_immediate_operand" "")))
++ (set (match_dup 1)
++ (unspec_volatile:SI
++ [(match_dup 3)
++ (match_dup 4)]
++ VUNSPEC_SYNC_STORE_IF_LOCK) )
++ (use (match_dup 2))
++ (use (match_dup 5))]
++ ""
++ {
++ rtx *mem_expr = &operands[1];
++ rtx ptr_reg;
++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
++ {
++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
++ XEXP (*mem_expr, 0) = ptr_reg;
++ }
++ else
++ {
++ rtx address = XEXP (*mem_expr, 0);
++ if ( REG_P (address) )
++ ptr_reg = address;
++ else if ( REG_P (XEXP (address, 0)) )
++ ptr_reg = XEXP (address, 0);
++ else
++ ptr_reg = XEXP (address, 1);
++ }
++
++ operands[3] = gen_reg_rtx (SImode);
++ operands[4] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
++ operands[5] = ptr_reg;
++ }
++ )
++
++(define_expand "sync_new_<atomic_insn>si"
++ [(set (match_operand:SI 0 "register_operand" "")
++ (unspec_volatile:SI
++ [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
++ (match_dup 3)]
++ VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
++ (set (match_dup 0)
++ (atomic_op:SI (match_dup 0)
++ (match_operand:SI 2 "register_immediate_operand" "")))
++ (set (match_dup 1)
++ (unspec_volatile:SI
++ [(match_dup 0)
++ (match_dup 3)]
++ VUNSPEC_SYNC_STORE_IF_LOCK) )
++ (use (match_dup 2))
++ (use (match_dup 4))]
++ ""
++ {
++ rtx *mem_expr = &operands[1];
++ rtx ptr_reg;
++ if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
++ {
++ ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
++ XEXP (*mem_expr, 0) = ptr_reg;
++ }
++ else
++ {
++ rtx address = XEXP (*mem_expr, 0);
++ if ( REG_P (address) )
++ ptr_reg = address;
++ else if ( REG_P (XEXP (address, 0)) )
++ ptr_reg = XEXP (address, 0);
++ else
++ ptr_reg = XEXP (address, 1);
++ }
++
++ operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
++ operands[4] = ptr_reg;
++ }
++ )
++
++
++;(define_insn "sync_<atomic_insn>si"
++; [(set (match_operand:SI 0 "memory_operand" "+RKs16")
++; (unspec_volatile:SI
++; [(atomic_op:SI (match_dup 0)
++; (match_operand:SI 1 "register_operand" "r"))]
++; VUNSPEC_SYNC_CMPXCHG))
++; (clobber (match_scratch:SI 2 "=&r"))]
++; ""
++; "0:
++; ssrf\t5
++; ld.w\t%2,%0
++; <atomic_asm_insn>\t%2,%1
++; stcond\t%0, %2
++; brne\t0b
++; "
++; [(set_attr "length" "14")
++; (set_attr "cc" "clobber")]
++; )
++;
++;(define_insn "sync_new_<atomic_insn>si"
++; [(set (match_operand:SI 1 "memory_operand" "+RKs16")
++; (unspec_volatile:SI
++; [(atomic_op:SI (match_dup 1)
++; (match_operand:SI 2 "register_operand" "r"))]
++; VUNSPEC_SYNC_CMPXCHG))
++; (set (match_operand:SI 0 "register_operand" "=&r")
++; (atomic_op:SI (match_dup 1)
++; (match_dup 2)))]
++; ""
++; "0:
++; ssrf\t5
++; ld.w\t%0,%1
++; <atomic_asm_insn>\t%0,%2
++; stcond\t%1, %0
++; brne\t0b
++; "
++; [(set_attr "length" "14")
++; (set_attr "cc" "clobber")]
++; )
++
++(define_insn "sync_lock_test_and_setsi"
++ [ (set (match_operand:SI 0 "register_operand" "=&r")
++ (match_operand:SI 1 "memory_operand" "+RKu00"))
++ (set (match_dup 1)
++ (match_operand:SI 2 "register_operand" "r")) ]
++ ""
++ "xchg\t%0, %p1, %2"
++ [(set_attr "length" "4")]
++ )
+--- /dev/null
++++ b/gcc/config/avr32/t-avr32
+@@ -0,0 +1,118 @@
++
++MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
++ $(srcdir)/config/avr32/sync.md \
++ $(srcdir)/config/avr32/simd.md \
++ $(srcdir)/config/avr32/predicates.md
++
++s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
++ s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
++
++# We want fine grained libraries, so use the new code
++# to build the floating point emulation libraries.
++FPBIT = fp-bit.c
++DPBIT = dp-bit.c
++
++LIB1ASMSRC = avr32/lib1funcs.S
++LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
++ _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
++ _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
++ _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
++ _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
++ _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
++ _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
++
++#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
++
++MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3/march=ucr3fp
++MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3 ucr3fp
++MULTILIB_EXCEPTIONS =
++MULTILIB_MATCHES += march?ap=mpart?ap7000
++MULTILIB_MATCHES += march?ap=mpart?ap7001
++MULTILIB_MATCHES += march?ap=mpart?ap7002
++MULTILIB_MATCHES += march?ap=mpart?ap7200
++MULTILIB_MATCHES += march?ucr1=march?uc
++MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
++MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
++MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a464
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a464s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256s
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
++MULTILIB_MATCHES += march?ucr3=mpart?uc64d3
++MULTILIB_MATCHES += march?ucr3=mpart?uc128d3
++MULTILIB_MATCHES += march?ucr3=mpart?uc64d4
++MULTILIB_MATCHES += march?ucr3=mpart?uc128d4
++MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512crevc
++MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512crevc
++MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512crevc
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l0256
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l0128
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l064revb
++MULTILIB_MATCHES += march?ucr3=mpart?uc64l3u
++MULTILIB_MATCHES += march?ucr3=mpart?uc128l3u
++MULTILIB_MATCHES += march?ucr3=mpart?uc256l3u
++MULTILIB_MATCHES += march?ucr3=mpart?uc64l4u
++MULTILIB_MATCHES += march?ucr3=mpart?uc128l4u
++MULTILIB_MATCHES += march?ucr3=mpart?uc256l4u
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c064c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0128c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0256c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0512c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c164c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1128c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1256c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1512c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c264c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2128c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2256c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2512c
++MULTILIB_MATCHES += march?ucr3=mpart?mxt768e
++
++
++EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
++
++CRTSTUFF_T_CFLAGS = -mrelax
++CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
++TARGET_LIBGCC2_CFLAGS += -mrelax
++
++LIBGCC = stmp-multilib
++INSTALL_LIBGCC = install-multilib
++
++fp-bit.c: $(srcdir)/config/fp-bit.c
++ echo '#define FLOAT' > fp-bit.c
++ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
++
++dp-bit.c: $(srcdir)/config/fp-bit.c
++ cat $(srcdir)/config/fp-bit.c > dp-bit.c
++
++
++
+--- /dev/null
++++ b/gcc/config/avr32/t-avr32-linux
+@@ -0,0 +1,118 @@
++
++MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
++ $(srcdir)/config/avr32/sync.md \
++ $(srcdir)/config/avr32/simd.md \
++ $(srcdir)/config/avr32/predicates.md
++
++s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
++ s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
++
++# We want fine grained libraries, so use the new code
++# to build the floating point emulation libraries.
++FPBIT = fp-bit.c
++DPBIT = dp-bit.c
++
++LIB1ASMSRC = avr32/lib1funcs.S
++LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
++ _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
++ _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
++ _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
++ _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
++ _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
++ _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
++
++#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
++
++MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3/march=ucr3fp
++MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3 ucr3fp
++MULTILIB_EXCEPTIONS =
++MULTILIB_MATCHES += march?ap=mpart?ap7000
++MULTILIB_MATCHES += march?ap=mpart?ap7001
++MULTILIB_MATCHES += march?ap=mpart?ap7002
++MULTILIB_MATCHES += march?ap=mpart?ap7200
++MULTILIB_MATCHES += march?ucr1=march?uc
++MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
++MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
++MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a464
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a464s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128s
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256s
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
++MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512
++MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
++MULTILIB_MATCHES += march?ucr3=mpart?uc64d3
++MULTILIB_MATCHES += march?ucr3=mpart?uc128d3
++MULTILIB_MATCHES += march?ucr3=mpart?uc64d4
++MULTILIB_MATCHES += march?ucr3=mpart?uc128d4
++MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512crevc
++MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512crevc
++MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512crevc
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l0256
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l0128
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
++MULTILIB_MATCHES += march?ucr3=mpart?uc3l064revb
++MULTILIB_MATCHES += march?ucr3=mpart?uc64l3u
++MULTILIB_MATCHES += march?ucr3=mpart?uc128l3u
++MULTILIB_MATCHES += march?ucr3=mpart?uc256l3u
++MULTILIB_MATCHES += march?ucr3=mpart?uc64l4u
++MULTILIB_MATCHES += march?ucr3=mpart?uc128l4u
++MULTILIB_MATCHES += march?ucr3=mpart?uc256l4u
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c064c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0128c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0256c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0512c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c164c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1128c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1256c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1512c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c264c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2128c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2256c
++MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2512c
++MULTILIB_MATCHES += march?ucr3=mpart?mxt768e
++
++
++EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o
++
++CRTSTUFF_T_CFLAGS = -mrelax
++CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
++TARGET_LIBGCC2_CFLAGS += -mrelax
++
++LIBGCC = stmp-multilib
++INSTALL_LIBGCC = install-multilib
++
++fp-bit.c: $(srcdir)/config/fp-bit.c
++ echo '#define FLOAT' > fp-bit.c
++ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
++
++dp-bit.c: $(srcdir)/config/fp-bit.c
++ cat $(srcdir)/config/fp-bit.c > dp-bit.c
++
++
++
+--- /dev/null
++++ b/gcc/config/avr32/t-elf
+@@ -0,0 +1,16 @@
++
++# Assemble startup files.
++$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES)
++ $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
++ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm
++
++$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES)
++ $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
++ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm
++
++
++# Build the libraries for both hard and soft floating point
++EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
++
++LIBGCC = stmp-multilib
++INSTALL_LIBGCC = install-multilib
+--- /dev/null
++++ b/gcc/config/avr32/uc3fpu.md
+@@ -0,0 +1,199 @@
++;; AVR32 machine description file for Floating-Point instructions.
++;; Copyright 2003-2006 Atmel Corporation.
++;;
++;;
++;; This file is part of GCC.
++;;
++;; This program is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 2 of the License, or
++;; (at your option) any later version.
++;;
++;; This program is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with this program; if not, write to the Free Software
++;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++
++(define_insn "*movsf_uc3fp"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,m")
++ (match_operand:SF 1 "general_operand" "r,G,m,r"))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "@
++ mov\t%0, %1
++ mov\t%0, %1
++ ld.w\t%0, %1
++ st.w\t%0, %1"
++ [(set_attr "length" "2,4,4,4")
++ (set_attr "type" "alu,alu,load,store")])
++
++(define_insn "mulsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (mult:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fmul.s\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++(define_insn "nmulsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r"))))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fnmul.s\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++(define_insn "macsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r"))
++ (match_operand:SF 3 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fmac.s\t%0, %3, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++;(define_insn "nmacsf3"
++; [(set (match_operand:SF 0 "register_operand" "=r")
++; (plus:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
++; (mult:SF(match_operand:SF 2 "register_operand" "r")
++; (match_operand:SF 3 "register_operand" "r"))))]
++; "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++; "fnmac.s\t%0, %1, %2, %3"
++; [(set_attr "length" "4")
++; (set_attr "type" "fmul")])
++
++(define_insn "nmacsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (minus:SF (mult:SF (match_operand:SF 2 "register_operand" "r")
++ (match_operand:SF 3 "register_operand" "r"))
++ (match_operand:SF 1 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fnmac.s\t%0, %1, %2, %3"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++(define_insn "msubacsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (minus:SF (match_operand:SF 3 "register_operand" "r")
++ (mult:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r"))))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fmsc.s\t%0, %3, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++(define_insn "nmsubacsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))
++ (match_operand:SF 3 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fnmsc.s\t%0, %3, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++(define_insn "addsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (plus:SF (match_operand:SF 1 "register_operand" "%r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fadd.s\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++(define_insn "subsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (minus:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fsub.s\t%0, %1, %2"
++ [(set_attr "length" "4")
++ (set_attr "type" "fmul")])
++
++(define_insn "fixuns_truncsfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fcastrs.uw\t%0, %1"
++ [(set_attr "length" "4")])
++
++(define_insn "fix_truncsfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (fix:SI (match_operand:SF 1 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fcastrs.sw\t%0, %1"
++ [(set_attr "length" "4")])
++
++(define_insn "floatunssisf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unsigned_float:SF (match_operand:SI 1 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fcastuw.s\t%0, %1"
++ [(set_attr "length" "4")])
++
++(define_insn "floatsisf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (float:SF (match_operand:SI 1 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "fcastsw.s\t%0, %1"
++ [(set_attr "length" "4")])
++
++(define_insn "cmpsf_internal_uc3fp"
++ [(set (cc0)
++ (compare:CC
++ (match_operand:SF 0 "register_operand" "r")
++ (match_operand:SF 1 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ {
++ avr32_branch_type = CMP_SF;
++ if (!rtx_equal_p(cc_prev_status.mdep.value, SET_SRC(PATTERN (insn))) )
++ return "fcmp.s\t%0, %1";
++ return "";
++ }
++ [(set_attr "length" "4")
++ (set_attr "cc" "compare")])
++
++(define_expand "divsf3"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (div:SF (match_operand:SF 1 "register_operand" "r")
++ (match_operand:SF 2 "register_operand" "r")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT && flag_unsafe_math_optimizations"
++ "{
++ emit_insn(gen_frcpa_internal(operands[0],operands[2]));
++ emit_insn(gen_mulsf3(operands[0],operands[0],operands[1]));
++ DONE;
++ }"
++)
++
++(define_insn "frcpa_internal"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FRCPA))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "frcpa.s %0,%1"
++ [(set_attr "length" "4")])
++
++(define_expand "sqrtsf2"
++ [(set (match_operand:SF 0 "register_operand" "")
++ (sqrt:SF (match_operand:SF 1 "register_operand" "")))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT && flag_unsafe_math_optimizations"
++ "
++{
++ rtx scratch = gen_reg_rtx (SFmode);
++ emit_insn (gen_rsqrtsf2 (scratch, operands[1], CONST1_RTX (SFmode)));
++ emit_insn (gen_divsf3(operands[0], force_reg (SFmode, CONST1_RTX (SFmode)),
++ scratch));
++ DONE;
++}")
++
++(define_insn "rsqrtsf2"
++ [(set (match_operand:SF 0 "register_operand" "=r")
++ (div:SF (match_operand:SF 2 "const_1f_operand" "F")
++ (sqrt:SF (match_operand:SF 1 "register_operand" "?r"))))]
++ "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
++ "frsqrta.s %1, %0")
+--- /dev/null
++++ b/gcc/config/avr32/uclinux-elf.h
+@@ -0,0 +1,20 @@
++
++/* Run-time Target Specification. */
++#undef TARGET_VERSION
++#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr)
++
++/* We don't want a .jcr section on uClinux. As if this makes a difference... */
++#define TARGET_USE_JCR_SECTION 0
++
++/* Here we go. Drop the crtbegin/crtend stuff completely. */
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC \
++ "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \
++ " %{!p:%{profile:gcrt1.o%s}" \
++ " %{!profile:crt1.o%s}}}} crti.o%s"
++
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC "crtn.o%s"
++
++#undef TARGET_DEFAULT
++#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
+--- a/gcc/config/host-linux.c
++++ b/gcc/config/host-linux.c
+@@ -25,6 +25,9 @@
+ #include "hosthooks.h"
+ #include "hosthooks-def.h"
+
++#ifndef SSIZE_MAX
++#define SSIZE_MAX LONG_MAX
++#endif
+
+ /* Linux has a feature called exec-shield-randomize that perturbs the
+ address of non-fixed mapped segments by a (relatively) small amount.
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -810,6 +810,24 @@ avr-*-rtems*)
+ avr-*-*)
+ tm_file="avr/avr.h dbxelf.h"
+ ;;
++avr32*-*-linux*)
++ tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h "
++ tmake_file="t-linux avr32/t-avr32-linux"
++ extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
++ extra_modes=avr32/avr32-modes.def
++ gnu_ld=yes
++ ;;
++avr32*-*-uclinux*)
++ tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h"
++ tmake_file="t-linux avr32/t-avr32-linux"
++ extra_modes=avr32/avr32-modes.def
++ gnu_ld=yes
++ ;;
++avr32-*-*)
++ tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h"
++ tmake_file="avr32/t-avr32 avr32/t-elf"
++ extra_modes=avr32/avr32-modes.def
++ ;;
+ bfin*-elf*)
+ tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
+ tmake_file=bfin/t-bfin-elf
+@@ -2764,6 +2782,32 @@ case "${target}" in
+ fi
+ ;;
+
++ avr32*-*-*)
++ supported_defaults="part arch"
++
++ case "$with_part" in
++ "" \
++ | "ap7000" | "ap7010" | "ap7020" | "uc3a0256" | "uc3a0512" | "uc3a1128" | "uc3a1256" | "uc3a1512" )
++ # OK
++ ;;
++ *)
++ echo "Unknown part used in --with-part=$with_part" 1>&2
++ exit 1
++ ;;
++ esac
++
++ case "$with_arch" in
++ "" \
++ | "ap" | "uc")
++ # OK
++ ;;
++ *)
++ echo "Unknown arch used in --with-arch=$with_arch" 1>&2
++ exit 1
++ ;;
++ esac
++ ;;
++
+ fr*-*-*linux*)
+ supported_defaults=cpu
+ case "$with_cpu" in
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -2240,10 +2240,9 @@ L2:],
+ as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q`
+ if echo "$as_ver" | grep GNU > /dev/null; then
+ changequote(,)dnl
+- as_vers=`echo $as_ver | sed -n \
+- -e 's,^.*[ ]\([0-9][0-9]*\.[0-9][0-9]*.*\)$,\1,p'`
+- as_major=`expr "$as_vers" : '\([0-9]*\)'`
+- as_minor=`expr "$as_vers" : '[0-9]*\.\([0-9]*\)'`
++ as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'`
++ as_major=`echo $as_ver | sed 's/\..*//'`
++ as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'`
+ changequote([,])dnl
+ if test $as_major -eq 2 && test $as_minor -lt 11
+ then :
+@@ -3308,7 +3307,7 @@ case "$target" in
+ i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
+ | x86_64*-*-* | hppa*-*-* | arm*-*-* \
+ | xstormy16*-*-* | cris-*-* | crisv32-*-* | xtensa*-*-* | bfin-*-* | score*-*-* \
+- | spu-*-* | fido*-*-* | m32c-*-*)
++ | spu-*-* | fido*-*-* | m32c-*-* | avr32-*-*)
+ insn="nop"
+ ;;
+ ia64*-*-* | s390*-*-*)
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -2397,7 +2397,7 @@ This attribute is ignored for R8C target
+
+ @item interrupt
+ @cindex interrupt handler functions
+-Use this attribute on the ARM, AVR, CRX, M32C, M32R/D, m68k,
++Use this attribute on the ARM, AVR, AVR32, CRX, M32C, M32R/D, m68k,
+ and Xstormy16 ports to indicate that the specified function is an
+ interrupt handler. The compiler will generate function entry and exit
+ sequences suitable for use in an interrupt handler when this attribute
+@@ -2417,6 +2417,15 @@ void f () __attribute__ ((interrupt ("IR
+
+ Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
+
++Note, for the AVR32, you can specify which banking scheme is used for
++the interrupt mode this interrupt handler is used in like this:
++
++@smallexample
++void f () __attribute__ ((interrupt ("FULL")));
++@end smallexample
++
++Permissible values for this parameter are: FULL, HALF, NONE and UNDEF.
++
+ On ARMv7-M the interrupt type is ignored, and the attribute means the function
+ may be called with a word aligned stack pointer.
+
+@@ -4188,6 +4197,23 @@ placed in either the @code{.bss_below100
+
+ @end table
+
++@subsection AVR32 Variable Attributes
++
++One attribute is currently defined for AVR32 configurations:
++@code{rmw_addressable}
++
++@table @code
++@item rmw_addressable
++@cindex @code{rmw_addressable} attribute
++
++This attribute can be used to signal that a variable can be accessed
++with the addressing mode of the AVR32 Atomic Read-Modify-Write memory
++instructions and hence make it possible for gcc to generate these
++instructions without using built-in functions or inline assembly statements.
++Variables used within the AVR32 Atomic Read-Modify-Write built-in
++functions will automatically get the @code{rmw_addressable} attribute.
++@end table
++
+ @subsection AVR Variable Attributes
+
+ @table @code
+@@ -7042,6 +7068,7 @@ instructions, but allow the compiler to
+ * Alpha Built-in Functions::
+ * ARM iWMMXt Built-in Functions::
+ * ARM NEON Intrinsics::
++* AVR32 Built-in Functions::
+ * Blackfin Built-in Functions::
+ * FR-V Built-in Functions::
+ * X86 Built-in Functions::
+@@ -7284,6 +7311,7 @@ long long __builtin_arm_wxor (long long,
+ long long __builtin_arm_wzero ()
+ @end smallexample
+
++
+ @node ARM NEON Intrinsics
+ @subsection ARM NEON Intrinsics
+
+@@ -7292,6 +7320,74 @@ when the @option{-mfpu=neon} switch is u
+
+ @include arm-neon-intrinsics.texi
+
++@node AVR32 Built-in Functions
++@subsection AVR32 Built-in Functions
++
++Built-in functions for atomic memory (RMW) instructions. Note that these
++built-ins will fail for targets where the RMW instructions are not
++implemented. Also note that these instructions only that a Ks15 << 2
++memory address and will therefor not work with any runtime computed
++memory addresses. The user is responsible for making sure that any
++pointers used within these functions points to a valid memory address.
++
++@smallexample
++void __builtin_mems(int */*ptr*/, int /*bit*/)
++void __builtin_memc(int */*ptr*/, int /*bit*/)
++void __builtin_memt(int */*ptr*/, int /*bit*/)
++@end smallexample
++
++Built-in functions for DSP instructions. Note that these built-ins will
++fail for targets where the DSP instructions are not implemented.
++
++@smallexample
++int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
++int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
++int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
++int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/)
++short __builtin_mulsathh_h (short, short)
++int __builtin_mulsathh_w (short, short)
++short __builtin_mulsatrndhh_h (short, short)
++int __builtin_mulsatrndwh_w (int, short)
++int __builtin_mulsatwh_w (int, short)
++int __builtin_macsathh_w (int, short, short)
++short __builtin_satadd_h (short, short)
++short __builtin_satsub_h (short, short)
++int __builtin_satadd_w (int, int)
++int __builtin_satsub_w (int, int)
++long long __builtin_mulwh_d(int, short)
++long long __builtin_mulnwh_d(int, short)
++long long __builtin_macwh_d(long long, int, short)
++long long __builtin_machh_d(long long, short, short)
++@end smallexample
++
++Other built-in functions for instructions that cannot easily be
++generated by the compiler.
++
++@smallexample
++void __builtin_ssrf(int);
++void __builtin_csrf(int);
++void __builtin_musfr(int);
++int __builtin_mustr(void);
++int __builtin_mfsr(int /*Status Register Address*/)
++void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/)
++int __builtin_mfdr(int /*Debug Register Address*/)
++void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/)
++void __builtin_cache(void * /*Address*/, int /*Cache Operation*/)
++void __builtin_sync(int /*Sync Operation*/)
++void __builtin_tlbr(void)
++void __builtin_tlbs(void)
++void __builtin_tlbw(void)
++void __builtin_breakpoint(void)
++int __builtin_xchg(void * /*Address*/, int /*Value*/ )
++short __builtin_bswap_16(short)
++int __builtin_bswap_32(int)
++void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/)
++int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/)
++void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
++long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
++void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
++@end smallexample
++
+ @node Blackfin Built-in Functions
+ @subsection Blackfin Built-in Functions
+
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -195,7 +195,7 @@ in the following sections.
+ -fvisibility-ms-compat @gol
+ -Wabi -Wctor-dtor-privacy @gol
+ -Wnon-virtual-dtor -Wreorder @gol
+--Weffc++ -Wstrict-null-sentinel @gol
++-Weffc++ -Wno-deprecated @gol
+ -Wno-non-template-friend -Wold-style-cast @gol
+ -Woverloaded-virtual -Wno-pmf-conversions @gol
+ -Wsign-promo}
+@@ -641,6 +641,12 @@ Objective-C and Objective-C++ Dialects}.
+ -mauto-incdec -minmax -mlong-calls -mshort @gol
+ -msoft-reg-count=@var{count}}
+
++@emph{AVR32 Options}
++@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
++-mforce-double-align -mno-init-got -mrelax -mmd-reorg-opt -masm-addr-pseudos @gol
++-mpart=@var{part} -mcpu=@var{cpu} -march=@var{arch} @gol
++-mfast-float -mimm-in-const-pool}
++
+ @emph{MCore Options}
+ @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol
+ -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol
+@@ -3256,13 +3262,11 @@ appears in a class without constructors.
+ If you want to warn about code which uses the uninitialized value of the
+ variable in its own initializer, use the @option{-Winit-self} option.
+
+-These warnings occur for individual uninitialized or clobbered
+-elements of structure, union or array variables as well as for
+-variables which are uninitialized or clobbered as a whole. They do
+-not occur for variables or elements declared @code{volatile}. Because
+-these warnings depend on optimization, the exact variables or elements
+-for which there are warnings will depend on the precise optimization
+-options and version of GCC used.
++These warnings occur only for variables that are candidates for
++register allocation. Therefore, they do not occur for a variable that
++is declared @code{volatile}, or whose address is taken, or whose size
++is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
++structures, unions or arrays, even when they are in registers.
+
+ Note that there may be no warning about a variable that is used only
+ to compute a value that itself is never used, because such
+@@ -7461,10 +7465,6 @@ If number of candidates in the set is sm
+ we always try to remove unnecessary ivs from the set during its
+ optimization when a new iv is added to the set.
+
+-@item scev-max-expr-size
+-Bound on size of expressions used in the scalar evolutions analyzer.
+-Large expressions slow the analyzer.
+-
+ @item omega-max-vars
+ The maximum number of variables in an Omega constraint system.
+ The default value is 128.
+@@ -8860,6 +8860,7 @@ platform.
+ * ARC Options::
+ * ARM Options::
+ * AVR Options::
++* AVR32 Options::
+ * Blackfin Options::
+ * CRIS Options::
+ * CRX Options::
+@@ -9348,6 +9349,145 @@ comply to the C standards, but it will p
+ size.
+ @end table
+
++@node AVR32 Options
++@subsection AVR32 Options
++@cindex AVR32 Options
++
++These options are defined for AVR32 implementations:
++
++@table @gcctabopt
++@item -muse-rodata-section
++@opindex muse-rodata-section
++Use section @samp{.rodata} for read-only data instead of @samp{.text}.
++
++@item -mhard-float
++@opindex mhard-float
++Use floating point coprocessor instructions.
++
++@item -msoft-float
++@opindex msoft-float
++Use software floating-point library for floating-point operations.
++
++@item -mforce-double-align
++@opindex mforce-double-align
++Force double-word alignment for double-word memory accesses.
++
++@item -masm-addr-pseudos
++@opindex masm-addr-pseudos
++Use assembler pseudo-instructions lda.w and call for handling direct
++addresses. (Enabled by default)
++
++@item -mno-init-got
++@opindex mno-init-got
++Do not initialize the GOT register before using it when compiling PIC
++code.
++
++@item -mrelax
++@opindex mrelax
++Let invoked assembler and linker do relaxing
++(Enabled by default when optimization level is >1).
++This means that when the address of symbols are known at link time,
++the linker can optimize @samp{icall} and @samp{mcall}
++instructions into a @samp{rcall} instruction if possible.
++Loading the address of a symbol can also be optimized.
++
++@item -mmd-reorg-opt
++@opindex mmd-reorg-opt
++Perform machine dependent optimizations in reorg stage.
++
++@item -mpart=@var{part}
++@opindex mpart
++Generate code for the specified part. Permissible parts are:
++@samp{ap7000},
++@samp{ap7001},
++@samp{ap7002},
++@samp{ap7200},
++@samp{uc3a0128},
++@samp{uc3a0256},
++@samp{uc3a0512},
++@samp{uc3a0512es},
++@samp{uc3a1128},
++@samp{uc3a1256},
++@samp{uc3a1512},
++@samp{uc3a1512es},
++@samp{uc3a3revd},
++@samp{uc3a364},
++@samp{uc3a364s},
++@samp{uc3a3128},
++@samp{uc3a3128s},
++@samp{uc3a3256},
++@samp{uc3a3256s},
++@samp{uc3a464},
++@samp{uc3a464s},
++@samp{uc3a4128},
++@samp{uc3a4128s},
++@samp{uc3a4256},
++@samp{uc3a4256s},
++@samp{uc3b064},
++@samp{uc3b0128},
++@samp{uc3b0256},
++@samp{uc3b0256es},
++@samp{uc3b0512},
++@samp{uc3b0512revc},
++@samp{uc3b164},
++@samp{uc3b1128},
++@samp{uc3b1256},
++@samp{uc3b1256es},
++@samp{uc3b1512},
++@samp{uc3b1512revc}
++@samp{uc64d3},
++@samp{uc128d3},
++@samp{uc64d4},
++@samp{uc128d4},
++@samp{uc3c0512crevc},
++@samp{uc3c1512crevc},
++@samp{uc3c2512crevc},
++@samp{uc3l0256},
++@samp{uc3l0128},
++@samp{uc3l064},
++@samp{uc3l032},
++@samp{uc3l016},
++@samp{uc3l064revb},
++@samp{uc64l3u},
++@samp{uc128l3u},
++@samp{uc256l3u},
++@samp{uc64l4u},
++@samp{uc128l4u},
++@samp{uc256l4u},
++@samp{uc3c064c},
++@samp{uc3c0128c},
++@samp{uc3c0256c},
++@samp{uc3c0512c},
++@samp{uc3c164c},
++@samp{uc3c1128c},
++@samp{uc3c1256c},
++@samp{uc3c1512c},
++@samp{uc3c264c},
++@samp{uc3c2128c},
++@samp{uc3c2256c},
++@samp{uc3c2512c},
++@samp{mxt768e}.
++
++@item -mcpu=@var{cpu-type}
++@opindex mcpu
++Same as -mpart. Obsolete.
++
++@item -march=@var{arch}
++@opindex march
++Generate code for the specified architecture. Permissible architectures are:
++@samp{ap}, @samp{uc} and @samp{ucr2}.
++
++@item -mfast-float
++@opindex mfast-float
++Enable fast floating-point library that does not conform to IEEE-754 but is still good enough
++for most applications. The fast floating-point library does not round to the nearest even
++but away from zero. Enabled by default if the -funsafe-math-optimizations switch is specified.
++
++@item -mimm-in-const-pool
++@opindex mimm-in-const-pool
++Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
++@end table
++
+ @node Blackfin Options
+ @subsection Blackfin Options
+ @cindex Blackfin Options
+@@ -9403,29 +9543,12 @@ When enabled, the compiler will ensure t
+ contain speculative loads after jump instructions. If this option is used,
+ @code{__WORKAROUND_SPECULATIVE_LOADS} is defined.
+
+-@item -mno-specld-anomaly
+-@opindex mno-specld-anomaly
+-Don't generate extra code to prevent speculative loads from occurring.
+-
+ @item -mcsync-anomaly
+ @opindex mcsync-anomaly
+ When enabled, the compiler will ensure that the generated code does not
+ contain CSYNC or SSYNC instructions too soon after conditional branches.
+ If this option is used, @code{__WORKAROUND_SPECULATIVE_SYNCS} is defined.
+
+-@item -mno-csync-anomaly
+-@opindex mno-csync-anomaly
+-Don't generate extra code to prevent CSYNC or SSYNC instructions from
+-occurring too soon after a conditional branch.
+-
+-@item -mlow-64k
+-@opindex mlow-64k
+-When enabled, the compiler is free to take advantage of the knowledge that
+-the entire program fits into the low 64k of memory.
+-
+-@item -mno-low-64k
+-@opindex mno-low-64k
+-Assume that the program is arbitrarily large. This is the default.
+
+ @item -mstack-check-l1
+ @opindex mstack-check-l1
+@@ -9439,11 +9562,6 @@ This allows for execute in place and sha
+ without virtual memory management. This option implies @option{-fPIC}.
+ With a @samp{bfin-elf} target, this option implies @option{-msim}.
+
+-@item -mno-id-shared-library
+-@opindex mno-id-shared-library
+-Generate code that doesn't assume ID based shared libraries are being used.
+-This is the default.
+-
+ @item -mleaf-id-shared-library
+ @opindex mleaf-id-shared-library
+ Generate code that supports shared libraries via the library ID method,
+@@ -9485,11 +9603,6 @@ call on this register. This switch is n
+ will lie outside of the 24 bit addressing range of the offset based
+ version of subroutine call instruction.
+
+-This feature is not enabled by default. Specifying
+-@option{-mno-long-calls} will restore the default behavior. Note these
+-switches have no effect on how the compiler generates code to handle
+-function calls via function pointers.
+-
+ @item -mfast-fp
+ @opindex mfast-fp
+ Link with the fast floating-point library. This library relaxes some of
+--- a/gcc/doc/md.texi
++++ b/gcc/doc/md.texi
+@@ -4,6 +4,7 @@
+ @c This is part of the GCC manual.
+ @c For copying conditions, see the file gcc.texi.
+
++
+ @ifset INTERNALS
+ @node Machine Desc
+ @chapter Machine Descriptions
+@@ -1685,6 +1686,58 @@ A memory reference suitable for iWMMXt l
+ A memory reference suitable for the ARMv4 ldrsb instruction.
+ @end table
+
++@item AVR32 family---@file{avr32.h}
++@table @code
++@item f
++Floating-point registers (f0 to f15)
++
++@item Ku@var{bits}
++Unsigned constant representable with @var{bits} number of bits (Must be
++two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08}
++
++@item Ks@var{bits}
++Signed constant representable with @var{bits} number of bits (Must be
++two digits). I.e: A signed 12-bit constant is written as @samp{Ks12}
++
++@item Is@var{bits}
++The negated range of a signed constant representable with @var{bits}
++number of bits. The same as @samp{Ks@var{bits}} with a negated range.
++This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}}
++
++@item G
++A single/double precision floating-point immediate or 64-bit integer
++immediate where the least and most significant words both can be
++loaded with a move instruction. That is the the integer form of the
++values in the least and most significant words both are in the range
++@math{-2^{20}} to @math{2^{20}-1}.
++
++@item RKs@var{bits}
++A memory reference where the address consists of a base register
++plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
++which has the same format as for the signed immediate integer constraint
++given above.
++
++@item RKu@var{bits}
++A memory reference where the address consists of a base register
++plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}}
++which has the same format as for the unsigned immediate integer constraint
++given above.
++
++@item S
++A memory reference with an immediate or register offset
++
++@item T
++A memory reference to a constant pool entry
++
++@item W
++A valid operand for use in the @samp{lda.w} instruction macro when
++relaxing is enabled
++
++@item Z
++A memory reference valid for coprocessor memory instructions
++
++@end table
++
+ @item AVR family---@file{config/avr/constraints.md}
+ @table @code
+ @item l
+--- a/gcc/expmed.c
++++ b/gcc/expmed.c
+@@ -472,9 +472,9 @@ store_bit_field_1 (rtx str_rtx, unsigned
+ ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
+ || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
+ && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
+- : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
++ : ( (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
+ || (offset * BITS_PER_UNIT % bitsize == 0
+- && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
++ && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))))
+ {
+ if (MEM_P (op0))
+ op0 = adjust_address (op0, fieldmode, offset);
+--- a/gcc/expr.c
++++ b/gcc/expr.c
+@@ -52,6 +52,7 @@ along with GCC; see the file COPYING3.
+ #include "tree-flow.h"
+ #include "target.h"
+ #include "timevar.h"
++#include "c-common.h"
+ #include "df.h"
+ #include "diagnostic.h"
+
+@@ -3647,16 +3648,17 @@ emit_single_push_insn (enum machine_mode
+ }
+ else
+ {
++ emit_move_insn (stack_pointer_rtx,
++ expand_binop (Pmode,
+ #ifdef STACK_GROWS_DOWNWARD
+- /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
+- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+- GEN_INT (-(HOST_WIDE_INT) rounded_size));
++ sub_optab,
+ #else
+- /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
+- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+- GEN_INT (rounded_size));
++ add_optab,
+ #endif
+- dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
++ stack_pointer_rtx,
++ GEN_INT (rounded_size),
++ NULL_RTX, 0, OPTAB_LIB_WIDEN));
++ dest_addr = stack_pointer_rtx;
+ }
+
+ dest = gen_rtx_MEM (mode, dest_addr);
+@@ -5775,7 +5777,8 @@ store_field (rtx target, HOST_WIDE_INT b
+ is a bit field, we cannot use addressing to access it.
+ Use bit-field techniques or SUBREG to store in it. */
+
+- if (mode == VOIDmode
++ if (
++ mode == VOIDmode
+ || (mode != BLKmode && ! direct_store[(int) mode]
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
+@@ -5932,7 +5935,18 @@ get_inner_reference (tree exp, HOST_WIDE
+ {
+ tree field = TREE_OPERAND (exp, 1);
+ size_tree = DECL_SIZE (field);
+- if (!DECL_BIT_FIELD (field))
++ if (!DECL_BIT_FIELD (field)
++ /* Added for AVR32:
++ Bitfields with a size equal to a target storage
++ type might not cause DECL_BIT_FIELD to return
++ true since it can be optimized into a normal array
++ access operation. But for volatile bitfields we do
++ not allow this when targetm.narrow_volatile_bitfield ()
++ is false. We can use DECL_C_BIT_FIELD to check if this
++ really is a c-bitfield. */
++ && !(TREE_THIS_VOLATILE (exp)
++ && !targetm.narrow_volatile_bitfield ()
++ && DECL_C_BIT_FIELD (field)) )
+ mode = DECL_MODE (field);
+ else if (DECL_MODE (field) == BLKmode)
+ blkmode_bitfield = true;
+@@ -7915,7 +7929,8 @@ expand_expr_real_1 (tree exp, rtx target
+ by doing the extract into an object as wide as the field
+ (which we know to be the width of a basic mode), then
+ storing into memory, and changing the mode to BLKmode. */
+- if (mode1 == VOIDmode
++ if (
++ mode1 == VOIDmode
+ || REG_P (op0) || GET_CODE (op0) == SUBREG
+ || (mode1 != BLKmode && ! direct_load[(int) mode1]
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
+--- a/gcc/function.c
++++ b/gcc/function.c
+@@ -2810,7 +2810,11 @@ assign_parm_setup_reg (struct assign_par
+ assign_parm_remove_parallels (data);
+
+ /* Copy the value into the register. */
+- if (data->nominal_mode != data->passed_mode
++ if ( (data->nominal_mode != data->passed_mode
++ /* Added for AVR32: If passed_mode is equal
++ to promoted nominal mode why should be convert?
++ The conversion should make no difference. */
++ && data->passed_mode != promoted_nominal_mode)
+ || promoted_nominal_mode != data->promoted_mode)
+ {
+ int save_tree_used;
+--- a/gcc/genemit.c
++++ b/gcc/genemit.c
+@@ -121,6 +121,24 @@ max_operand_vec (rtx insn, int arg)
+ }
+
+ static void
++gen_vararg_prologue(int operands)
++{
++ int i;
++
++ if (operands > 1)
++ {
++ for (i = 1; i < operands; i++)
++ printf(" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
++
++ printf(" va_list args;\n\n");
++ printf(" va_start(args, operand0);\n");
++ for (i = 1; i < operands; i++)
++ printf(" operand%d = va_arg(args, rtx);\n", i);
++ printf(" va_end(args);\n\n");
++ }
++}
++
++static void
+ print_code (RTX_CODE code)
+ {
+ const char *p1;
+@@ -406,18 +424,16 @@ gen_insn (rtx insn, int lineno)
+ fatal ("match_dup operand number has no match_operand");
+
+ /* Output the function name and argument declarations. */
+- printf ("rtx\ngen_%s (", XSTR (insn, 0));
++ printf ("rtx\ngen_%s ", XSTR (insn, 0));
++
+ if (operands)
+- for (i = 0; i < operands; i++)
+- if (i)
+- printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i);
++ printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
+ else
+- printf ("rtx operand%d ATTRIBUTE_UNUSED", i);
+- else
+- printf ("void");
+- printf (")\n");
++ printf("(void)\n");
+ printf ("{\n");
+
++ gen_vararg_prologue(operands);
++
+ /* Output code to construct and return the rtl for the instruction body. */
+
+ if (XVECLEN (insn, 1) == 1)
+@@ -461,16 +477,12 @@ gen_expand (rtx expand)
+ operands = max_operand_vec (expand, 1);
+
+ /* Output the function name and argument declarations. */
+- printf ("rtx\ngen_%s (", XSTR (expand, 0));
++ printf ("rtx\ngen_%s ", XSTR (expand, 0));
+ if (operands)
+- for (i = 0; i < operands; i++)
+- if (i)
+- printf (",\n\trtx operand%d", i);
+- else
+- printf ("rtx operand%d", i);
++ printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
+ else
+- printf ("void");
+- printf (")\n");
++ printf("(void)\n");
++
+ printf ("{\n");
+
+ /* If we don't have any C code to write, only one insn is being written,
+@@ -480,6 +492,8 @@ gen_expand (rtx expand)
+ && operands > max_dup_opno
+ && XVECLEN (expand, 1) == 1)
+ {
++ gen_vararg_prologue(operands);
++
+ printf (" return ");
+ gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
+ printf (";\n}\n\n");
+@@ -493,6 +507,7 @@ gen_expand (rtx expand)
+ for (; i <= max_scratch_opno; i++)
+ printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
+ printf (" rtx _val = 0;\n");
++ gen_vararg_prologue(operands);
+ printf (" start_sequence ();\n");
+
+ /* The fourth operand of DEFINE_EXPAND is some code to be executed
+--- a/gcc/genflags.c
++++ b/gcc/genflags.c
+@@ -127,7 +127,6 @@ static void
+ gen_proto (rtx insn)
+ {
+ int num = num_operands (insn);
+- int i;
+ const char *name = XSTR (insn, 0);
+ int truth = maybe_eval_c_test (XSTR (insn, 2));
+
+@@ -158,12 +157,7 @@ gen_proto (rtx insn)
+ if (num == 0)
+ fputs ("void", stdout);
+ else
+- {
+- for (i = 1; i < num; i++)
+- fputs ("rtx, ", stdout);
+-
+- fputs ("rtx", stdout);
+- }
++ fputs("rtx, ...", stdout);
+
+ puts (");");
+
+@@ -173,12 +167,7 @@ gen_proto (rtx insn)
+ {
+ printf ("static inline rtx\ngen_%s", name);
+ if (num > 0)
+- {
+- putchar ('(');
+- for (i = 0; i < num-1; i++)
+- printf ("rtx ARG_UNUSED (%c), ", 'a' + i);
+- printf ("rtx ARG_UNUSED (%c))\n", 'a' + i);
+- }
++ puts("(rtx ARG_UNUSED(a), ...)");
+ else
+ puts ("(void)");
+ puts ("{\n return 0;\n}");
+--- a/gcc/genoutput.c
++++ b/gcc/genoutput.c
+@@ -386,7 +386,7 @@ output_insn_data (void)
+ }
+
+ if (d->name && d->name[0] != '*')
+- printf (" (insn_gen_fn) gen_%s,\n", d->name);
++ printf (" gen_%s,\n", d->name);
+ else
+ printf (" 0,\n");
+
+--- a/gcc/ifcvt.c
++++ b/gcc/ifcvt.c
+@@ -84,7 +84,7 @@ static int num_possible_if_blocks;
+ static int num_updated_if_blocks;
+
+ /* # of changes made. */
+-static int num_true_changes;
++int num_true_changes;
+
+ /* Whether conditional execution changes were made. */
+ static int cond_exec_changed_p;
+@@ -290,6 +290,9 @@ cond_exec_process_insns (ce_if_block_t *
+ if (must_be_last)
+ return FALSE;
+
++#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
++ if ( !IFCVT_ALLOW_MODIFY_TEST_IN_INSN )
++#endif
+ if (modified_in_p (test, insn))
+ {
+ if (!mod_ok)
+@@ -570,15 +573,18 @@ cond_exec_process_if_block (ce_if_block_
+ IFCVT_MODIFY_FINAL (ce_info);
+ #endif
+
++ /* Merge the blocks! */
++ if ( reload_completed ){
+ /* Conversion succeeded. */
+ if (dump_file)
+ fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
+ n_insns, (n_insns == 1) ? " was" : "s were");
+
+- /* Merge the blocks! */
+ merge_if_block (ce_info);
+ cond_exec_changed_p = TRUE;
+ return TRUE;
++ }
++ return FALSE;
+
+ fail:
+ #ifdef IFCVT_MODIFY_CANCEL
+@@ -1087,7 +1093,11 @@ noce_try_addcc (struct noce_if_info *if_
+ != UNKNOWN))
+ {
+ rtx cond = if_info->cond;
+- enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
++ /* This generates wrong code for AVR32. The cond code need not be reversed
++ since the addmodecc patterns add if the condition is NOT met. */
++ /* enum rtx_code code = reversed_comparison_code (cond, if_info->jump);*/
++ enum rtx_code code = GET_CODE(cond);
++
+
+ /* First try to use addcc pattern. */
+ if (general_operand (XEXP (cond, 0), VOIDmode)
+@@ -3039,7 +3049,12 @@ find_if_header (basic_block test_bb, int
+ && noce_find_if_block (test_bb, then_edge, else_edge, pass))
+ goto success;
+
+- if (HAVE_conditional_execution && reload_completed
++ if (HAVE_conditional_execution &&
++#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
++ (reload_completed || IFCVT_COND_EXEC_BEFORE_RELOAD)
++#else
++ reload_completed
++#endif
+ && cond_exec_find_if_block (&ce_info))
+ goto success;
+
+@@ -3154,7 +3169,11 @@ cond_exec_find_if_block (struct ce_if_bl
+
+ /* We only ever should get here after reload,
+ and only if we have conditional execution. */
++#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
++ gcc_assert (HAVE_conditional_execution && (reload_completed||IFCVT_COND_EXEC_BEFORE_RELOAD));
++#else
+ gcc_assert (HAVE_conditional_execution && reload_completed);
++#endif
+
+ /* Discover if any fall through predecessors of the current test basic block
+ were && tests (which jump to the else block) or || tests (which jump to
+@@ -4259,6 +4278,14 @@ gate_handle_if_after_reload (void)
+ static unsigned int
+ rest_of_handle_if_after_reload (void)
+ {
++ /* Hack for the AVR32 experimental ifcvt processing before reload.
++ The AVR32 specific ifcvt code needs to know when ifcvt after reload
++ has begun. */
++#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
++ if ( IFCVT_COND_EXEC_BEFORE_RELOAD )
++ cfun->machine->ifcvt_after_reload = 1;
++#endif
++
+ if_convert ();
+ return 0;
+ }
+--- a/gcc/longlong.h
++++ b/gcc/longlong.h
+@@ -250,6 +250,41 @@ UDItype __umulsidi3 (USItype, USItype);
+ #define COUNT_LEADING_ZEROS_0 32
+ #endif
+
++#if defined (__avr32__) && W_TYPE_SIZE == 32
++#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
++ __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \
++ : "=r" ((USItype) (sh)), \
++ "=&r" ((USItype) (sl)) \
++ : "r" ((USItype) (ah)), \
++ "r" ((USItype) (bh)), \
++ "r" ((USItype) (al)), \
++ "r" ((USItype) (bl)) __CLOBBER_CC)
++#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
++ __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
++ : "=r" ((USItype) (sh)), \
++ "=&r" ((USItype) (sl)) \
++ : "r" ((USItype) (ah)), \
++ "r" ((USItype) (bh)), \
++ "r" ((USItype) (al)), \
++ "r" ((USItype) (bl)) __CLOBBER_CC)
++
++#if !defined (__AVR32_NO_MUL__)
++#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
++
++#define umul_ppmm(w1, w0, u, v) \
++{ \
++ DWunion __w; \
++ __w.ll = __umulsidi3 (u, v); \
++ w1 = __w.s.high; \
++ w0 = __w.s.low; \
++}
++#endif
++
++#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
++#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
++#define COUNT_LEADING_ZEROS_0 32
++#endif
++
+ #if defined (__CRIS__) && __CRIS_arch_version >= 3
+ #define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X))
+ #if __CRIS_arch_version >= 8
+--- a/gcc/optabs.h
++++ b/gcc/optabs.h
+@@ -603,7 +603,7 @@ extern enum insn_code reload_out_optab[N
+ extern optab code_to_optab[NUM_RTX_CODE + 1];
+
+
+-typedef rtx (*rtxfun) (rtx);
++typedef rtx (*rtxfun) (rtx, ...);
+
+ /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
+ gives the gen_function to make a branch to test that condition. */
+--- a/gcc/regrename.c
++++ b/gcc/regrename.c
+@@ -1582,6 +1582,9 @@ copyprop_hardreg_forward_1 (basic_block
+ bool changed = false;
+ rtx insn;
+
++ rtx prev_pred_test;
++ int prev_pred_insn_skipped = 0;
++
+ for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
+ {
+ int n_ops, i, alt, predicated;
+@@ -1621,6 +1624,58 @@ copyprop_hardreg_forward_1 (basic_block
+ recog_data.operand_type[i] = OP_INOUT;
+ }
+
++
++ /* Added for targets (AVR32) which supports test operands to be modified
++ in cond_exec instruction. For these targets we cannot make a change to
++ the test operands if one of the test operands is an output operand This beacuse
++ changing the test operands might cause the need for inserting a new test
++ insns in the middle of a sequence of cond_exec insns and if the test operands
++ are modified these tests will fail.
++ */
++ if ( IFCVT_ALLOW_MODIFY_TEST_IN_INSN
++ && predicated )
++ {
++ int insn_skipped = 0;
++ rtx test = COND_EXEC_TEST (PATTERN (insn));
++
++ /* Check if the previous insn was a skipped predicated insn with the same
++ test as this predicated insns. If so we cannot do any modification to
++ this insn either since we cannot emit the test insn because the operands
++ are clobbered. */
++ if ( prev_pred_insn_skipped
++ && (rtx_equal_p (test, prev_pred_test)
++ || rtx_equal_p (test, reversed_condition (prev_pred_test))) )
++ {
++ insn_skipped = 1;
++ }
++ else
++ {
++ /* Check if the output operand is used in the test expression. */
++ for (i = 0; i < n_ops; ++i)
++ if ( recog_data.operand_type[i] == OP_INOUT
++ && reg_mentioned_p (recog_data.operand[i], test) )
++ {
++ insn_skipped = 1;
++ break;
++ }
++
++ }
++
++ prev_pred_test = test;
++ prev_pred_insn_skipped = insn_skipped;
++ if ( insn_skipped )
++ {
++ if (insn == BB_END (bb))
++ break;
++ else
++ continue;
++ }
++ }
++ else
++ {
++ prev_pred_insn_skipped = 0;
++ }
++
+ /* For each earlyclobber operand, zap the value data. */
+ for (i = 0; i < n_ops; i++)
+ if (recog_op_alt[i][alt].earlyclobber)
+--- a/gcc/sched-deps.c
++++ b/gcc/sched-deps.c
+@@ -1473,7 +1473,14 @@ fixup_sched_groups (rtx insn)
+
+ prev_nonnote = prev_nonnote_insn (insn);
+ if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
+- && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
++ /* Modification for AVR32 by RP: Why is this here, this will
++ cause instruction to be without any dependencies which might
++ cause it to be moved anywhere. For the AVR32 we try to keep
++ a group of conditionals together even if they are mutual exclusive.
++ */
++ && (! sched_insns_conditions_mutex_p (insn, prev_nonnote)
++ || GET_CODE (PATTERN (insn)) == COND_EXEC )
++ )
+ add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
+ }
+
+@@ -2230,8 +2237,29 @@ sched_analyze_insn (struct deps *deps, r
+
+ if (code == COND_EXEC)
+ {
++#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
++ if (IFCVT_ALLOW_MODIFY_TEST_IN_INSN)
++ {
++ /* Check if we have a group og conditional instructions with the same test.
++ If so we must make sure that they are not scheduled apart in order to
++ avoid unnecesarry tests and if one of the registers in the test is modified
++ in the instruction this is needed to ensure correct code. */
++ if ( prev_nonnote_insn (insn)
++ && INSN_P (prev_nonnote_insn (insn))
++ && GET_CODE (PATTERN (prev_nonnote_insn (insn))) == COND_EXEC
++ && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 0), XEXP (COND_EXEC_TEST (x), 0))
++ && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 1), XEXP (COND_EXEC_TEST (x), 1))
++ && ( GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == GET_CODE (COND_EXEC_TEST (x))
++ || GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == reversed_comparison_code (COND_EXEC_TEST (x), insn)))
++ {
++ SCHED_GROUP_P (insn) = 1;
++ //CANT_MOVE (prev_nonnote_insn (insn)) = 1;
++ }
++ }
++#endif
+ sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
+
++
+ /* ??? Should be recording conditions so we reduce the number of
+ false dependencies. */
+ x = COND_EXEC_CODE (x);
+--- a/gcc/testsuite/gcc.dg/sibcall-3.c
++++ b/gcc/testsuite/gcc.dg/sibcall-3.c
+@@ -5,7 +5,7 @@
+ Copyright (C) 2002 Free Software Foundation Inc.
+ Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
+
+-/* { dg-do run { xfail { { arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
++/* { dg-do run { xfail { { arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
+ /* -mlongcall disables sibcall patterns. */
+ /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
+ /* { dg-options "-O2 -foptimize-sibling-calls" } */
+--- a/gcc/testsuite/gcc.dg/sibcall-4.c
++++ b/gcc/testsuite/gcc.dg/sibcall-4.c
+@@ -5,7 +5,7 @@
+ Copyright (C) 2002 Free Software Foundation Inc.
+ Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
+
+-/* { dg-do run { xfail { { arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
++/* { dg-do run { xfail { { arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
+ /* -mlongcall disables sibcall patterns. */
+ /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
+ /* { dg-options "-O2 -foptimize-sibling-calls" } */
+--- a/gcc/testsuite/gcc.dg/trampoline-1.c
++++ b/gcc/testsuite/gcc.dg/trampoline-1.c
+@@ -47,6 +47,8 @@ void foo (void)
+
+ int main (void)
+ {
++#ifndef NO_TRAMPOLINES
+ foo ();
++#endif
+ return 0;
+ }
+--- a/libgcc/config.host
++++ b/libgcc/config.host
+@@ -218,6 +218,13 @@ arm*-wince-pe*)
+ ;;
+ arm-*-pe*)
+ ;;
++avr32-*-linux*)
++ # No need to build crtbeginT.o on uClibc systems. Should probably be
++ # moved to the OS specific section above.
++ extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
++ ;;
++avr32-*-*)
++ ;;
+ avr-*-rtems*)
+ ;;
+ avr-*-*)
+--- a/libstdc++-v3/config/os/gnu-linux/ctype_base.h
++++ b/libstdc++-v3/config/os/gnu-linux/ctype_base.h
+@@ -26,6 +26,8 @@
+ //
+ // ISO C++ 14882: 22.1 Locales
+ //
++#include <features.h>
++#include <ctype.h>
+
+ /** @file ctype_base.h
+ * This is an internal header file, included by other library headers.
+@@ -40,7 +42,11 @@ _GLIBCXX_BEGIN_NAMESPACE(std)
+ struct ctype_base
+ {
+ // Non-standard typedefs.
++#ifdef __UCLIBC__
++ typedef const __ctype_touplow_t* __to_type;
++#else
+ typedef const int* __to_type;
++#endif
+
+ // NB: Offsets into ctype<char>::_M_table force a particular size
+ // on the mask type. Because of this, we don't use an enum.
+--- a/libstdc++-v3/include/Makefile.in
++++ b/libstdc++-v3/include/Makefile.in
+@@ -36,6 +36,7 @@ POST_UNINSTALL = :
+ build_triplet = @build@
+ host_triplet = @host@
+ target_triplet = @target@
++LIBOBJDIR =
+ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+ $(top_srcdir)/fragment.am
+ subdir = include
+--- a/libstdc++-v3/libsupc++/Makefile.in
++++ b/libstdc++-v3/libsupc++/Makefile.in
+@@ -38,6 +38,7 @@ POST_UNINSTALL = :
+ build_triplet = @build@
+ host_triplet = @host@
+ target_triplet = @target@
++LIBOBJDIR =
+ DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in $(top_srcdir)/fragment.am
+ subdir = libsupc++
+--- a/libstdc++-v3/Makefile.in
++++ b/libstdc++-v3/Makefile.in
+@@ -36,6 +36,7 @@ POST_UNINSTALL = :
+ build_triplet = @build@
+ host_triplet = @host@
+ target_triplet = @target@
++LIBOBJDIR =
+ DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/../config.guess \
+ $(srcdir)/../config.sub README ChangeLog $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(top_srcdir)/configure \
+--- a/libstdc++-v3/po/Makefile.in
++++ b/libstdc++-v3/po/Makefile.in
+@@ -36,6 +36,7 @@ POST_UNINSTALL = :
+ build_triplet = @build@
+ host_triplet = @host@
+ target_triplet = @target@
++LIBOBJDIR =
+ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+ $(top_srcdir)/fragment.am
+ subdir = po
+--- a/libstdc++-v3/src/Makefile.in
++++ b/libstdc++-v3/src/Makefile.in
+@@ -37,6 +37,7 @@ POST_UNINSTALL = :
+ build_triplet = @build@
+ host_triplet = @host@
+ target_triplet = @target@
++LIBOBJDIR =
+ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+ $(top_srcdir)/fragment.am
+ subdir = src
diff --git a/toolchain/gcc/patches/4.4.7/931-avr32_disable_shifted_data_opt.patch b/toolchain/gcc/patches/4.4.7/931-avr32_disable_shifted_data_opt.patch
new file mode 100644
index 000000000..2003e97ae
--- /dev/null
+++ b/toolchain/gcc/patches/4.4.7/931-avr32_disable_shifted_data_opt.patch
@@ -0,0 +1,32 @@
+--- a/gcc/config/avr32/avr32.c
++++ b/gcc/config/avr32/avr32.c
+@@ -6726,7 +6726,28 @@ avr32_reorg_optimization (void)
+ }
+ }
+
+- if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
++ /* Disabled this optimization since it has a bug */
++ /* In the case where the data instruction the shifted insn gets folded
++ * into is a branch destination, this breaks, i.e.
++ *
++ * add r8, r10, r8 << 2
++ * 1:
++ * ld.w r11, r8[0]
++ * ...
++ * mov r8, sp
++ * rjmp 1b
++ *
++ * gets folded to:
++ *
++ * 1:
++ * ld.w r11, r10[r8 << 2]
++ * ...
++ * mov r8, sp
++ * rjmp 1b
++ *
++ * which is clearly wrong..
++ */
++ if (0 && TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
+ {
+
+ /* Scan through all insns looking for shifted add operations */
diff --git a/toolchain/gcc/patches/4.4.7/933-avr32_bug_7435.patch b/toolchain/gcc/patches/4.4.7/933-avr32_bug_7435.patch
new file mode 100644
index 000000000..78106619c
--- /dev/null
+++ b/toolchain/gcc/patches/4.4.7/933-avr32_bug_7435.patch
@@ -0,0 +1,32 @@
+--- a/gcc/config/avr32/avr32.c
++++ b/gcc/config/avr32/avr32.c
+@@ -243,14 +243,14 @@ void
+ avr32_override_options (void)
+ {
+ const struct part_type_s *part;
+- const struct arch_type_s *arch;
++ const struct arch_type_s *arch, *part_arch;
+
+ /*Add backward compability*/
+ if (strcmp ("uc", avr32_arch_name)== 0)
+ {
+ fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
+ "Please use '-march=ucr1' instead. "
+- "Converting to arch 'ucr1'\n",
++ "Using arch 'ucr1'\n",
+ avr32_arch_name);
+ avr32_arch_name="ucr1";
+ }
+@@ -298,6 +298,12 @@ avr32_override_options (void)
+ if (!arch->name)
+ avr32_arch = &avr32_arch_types[avr32_part->arch_type];
+
++ /* When architecture implied by -mpart and one passed in -march are
++ * conflicting, issue an error message */
++ part_arch = &avr32_arch_types[avr32_part->arch_type];
++ if (strcmp("none",avr32_part_name) && strcmp("none", avr32_arch_name) && strcmp(avr32_arch_name,part_arch->name))
++ error ("Conflicting architectures implied by -mpart and -march\n");
++
+ /* If optimization level is two or greater, then align start of loops to a
+ word boundary since this will allow folding the first insn of the loop.
+ Do this only for targets supporting branch prediction. */
diff --git a/toolchain/gcc/patches/4.4.7/934-avr32_bug_9675.patch b/toolchain/gcc/patches/4.4.7/934-avr32_bug_9675.patch
new file mode 100644
index 000000000..3690e2d04
--- /dev/null
+++ b/toolchain/gcc/patches/4.4.7/934-avr32_bug_9675.patch
@@ -0,0 +1,21 @@
+--- a/gcc/config/avr32/lib1funcs.S
++++ b/gcc/config/avr32/lib1funcs.S
+@@ -1460,7 +1460,6 @@ __avr32_f64_cmp_lt:
+ 0:
+ ld.w r7, sp++
+ popm pc, r12=0
+-#endif
+
+ 3:
+ cp.w r7, 1 /* Check sign bit from r9 */
+@@ -1481,8 +1480,8 @@ __avr32_f64_cmp_lt:
+ reteq 0 /* Both operands are zero. Return false. */
+ #endif
+ ret r12
+-
+-
++#endif
++
+ #if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast)
+ .align 2
+
diff --git a/toolchain/gcc/patches/4.4.7/cflags.patch b/toolchain/gcc/patches/4.4.7/cflags.patch
new file mode 100644
index 000000000..7952d71fe
--- /dev/null
+++ b/toolchain/gcc/patches/4.4.7/cflags.patch
@@ -0,0 +1,257 @@
+diff -Nur gcc-4.4.7.orig/gcc/common.opt gcc-4.4.7/gcc/common.opt
+--- gcc-4.4.7.orig/gcc/common.opt 2009-03-28 18:28:45.000000000 +0100
++++ gcc-4.4.7/gcc/common.opt 2014-09-09 19:50:59.000000000 +0200
+@@ -102,6 +102,10 @@
+ Common Joined
+ Treat specified warning as error
+
++Werror-maybe-reset
++Common
++If environment variable GCC_NO_WERROR is set, act as -Wno-error
++
+ Wextra
+ Common Warning
+ Print extra (possibly unwanted) warnings
+@@ -573,6 +577,9 @@
+ Common Report Var(flag_guess_branch_prob) Optimization
+ Enable guessing of branch probabilities
+
++fhonour-copts
++Common RejectNegative
++
+ ; Nonzero means ignore `#ident' directives. 0 means handle them.
+ ; Generate position-independent code for executables if possible
+ ; On SVR4 targets, it also controls whether or not to emit a
+diff -Nur gcc-4.4.7.orig/gcc/c.opt gcc-4.4.7/gcc/c.opt
+--- gcc-4.4.7.orig/gcc/c.opt 2009-09-18 23:53:23.000000000 +0200
++++ gcc-4.4.7/gcc/c.opt 2014-09-09 19:50:59.000000000 +0200
+@@ -215,6 +215,10 @@
+ C ObjC RejectNegative Warning
+ This switch is deprecated; use -Werror=implicit-function-declaration instead
+
++Werror-maybe-reset
++C ObjC C++ ObjC++
++; Documented in common.opt
++
+ Wfloat-equal
+ C ObjC C++ ObjC++ Var(warn_float_equal) Warning
+ Warn if testing floating point numbers for equality
+@@ -613,6 +617,9 @@
+ fhonor-std
+ C++ ObjC++
+
++fhonour-copts
++C ObjC C++ ObjC++ RejectNegative
++
+ fhosted
+ C ObjC
+ Assume normal C execution environment
+diff -Nur gcc-4.4.7.orig/gcc/c-opts.c gcc-4.4.7/gcc/c-opts.c
+--- gcc-4.4.7.orig/gcc/c-opts.c 2009-02-18 03:16:03.000000000 +0100
++++ gcc-4.4.7/gcc/c-opts.c 2014-09-09 19:50:59.000000000 +0200
+@@ -105,6 +105,9 @@
+ /* Number of deferred options scanned for -include. */
+ static size_t include_cursor;
+
++/* Check if a port honours COPTS. */
++static int honour_copts = 0;
++
+ static void set_Wimplicit (int);
+ static void handle_OPT_d (const char *);
+ static void set_std_cxx98 (int);
+@@ -454,6 +457,14 @@
+ enable_warning_as_error ("implicit-function-declaration", value, CL_C | CL_ObjC);
+ break;
+
++ case OPT_Werror_maybe_reset:
++ {
++ char *ev = getenv ("GCC_NO_WERROR");
++ if ((ev != NULL) && (*ev != '0'))
++ cpp_opts->warnings_are_errors = 0;
++ }
++ break;
++
+ case OPT_Wformat:
+ set_Wformat (value);
+ break;
+@@ -690,6 +701,12 @@
+ flag_exceptions = value;
+ break;
+
++ case OPT_fhonour_copts:
++ if (c_language == clk_c) {
++ honour_copts++;
++ }
++ break;
++
+ case OPT_fimplement_inlines:
+ flag_implement_inlines = value;
+ break;
+@@ -1209,6 +1226,47 @@
+ return false;
+ }
+
++ if (c_language == clk_c) {
++ char *ev = getenv ("GCC_HONOUR_COPTS");
++ int evv;
++ if (ev == NULL)
++ evv = -1;
++ else if ((*ev == '0') || (*ev == '\0'))
++ evv = 0;
++ else if (*ev == '1')
++ evv = 1;
++ else if (*ev == '2')
++ evv = 2;
++ else if (*ev == 's')
++ evv = -1;
++ else {
++ warning (0, "unknown GCC_HONOUR_COPTS value, assuming 1");
++ evv = 1; /* maybe depend this on something like MIRBSD_NATIVE? */
++ }
++ if (evv == 1) {
++ if (honour_copts == 0) {
++ error ("someone does not honour COPTS at all in lenient mode");
++ return false;
++ } else if (honour_copts != 1) {
++ warning (0, "someone does not honour COPTS correctly, passed %d times",
++ honour_copts);
++ }
++ } else if (evv == 2) {
++ if (honour_copts == 0) {
++ error ("someone does not honour COPTS at all in strict mode");
++ return false;
++ } else if (honour_copts != 1) {
++ error ("someone does not honour COPTS correctly, passed %d times",
++ honour_copts);
++ return false;
++ }
++ } else if (evv == 0) {
++ if (honour_copts != 1)
++ inform (0, "someone does not honour COPTS correctly, passed %d times",
++ honour_copts);
++ }
++ }
++
+ return true;
+ }
+
+diff -Nur gcc-4.4.7.orig/gcc/doc/cppopts.texi gcc-4.4.7/gcc/doc/cppopts.texi
+--- gcc-4.4.7.orig/gcc/doc/cppopts.texi 2008-06-15 11:42:13.000000000 +0200
++++ gcc-4.4.7/gcc/doc/cppopts.texi 2014-09-09 19:50:59.000000000 +0200
+@@ -164,6 +164,11 @@
+ Make all warnings into hard errors. Source code which triggers warnings
+ will be rejected.
+
++ at item -Werror-maybe-reset
++ at opindex Werror-maybe-reset
++Act like @samp{-Wno-error} if the @env{GCC_NO_WERROR} environment
++variable is set to anything other than 0 or empty.
++
+ @item -Wsystem-headers
+ @opindex Wsystem-headers
+ Issue warnings for code in system headers. These are normally unhelpful
+diff -Nur gcc-4.4.7.orig/gcc/doc/invoke.texi gcc-4.4.7/gcc/doc/invoke.texi
+--- gcc-4.4.7.orig/gcc/doc/invoke.texi 2011-03-23 23:02:12.000000000 +0100
++++ gcc-4.4.7/gcc/doc/invoke.texi 2014-09-09 19:50:59.000000000 +0200
+@@ -234,7 +234,7 @@
+ -Wconversion -Wcoverage-mismatch -Wno-deprecated @gol
+ -Wno-deprecated-declarations -Wdisabled-optimization @gol
+ -Wno-div-by-zero -Wempty-body -Wenum-compare -Wno-endif-labels @gol
+--Werror -Werror=* @gol
++-Werror -Werror=* -Werror-maybe-reset @gol
+ -Wfatal-errors -Wfloat-equal -Wformat -Wformat=2 @gol
+ -Wno-format-contains-nul -Wno-format-extra-args -Wformat-nonliteral @gol
+ -Wformat-security -Wformat-y2k @gol
+@@ -4182,6 +4182,22 @@
+ @option{-Wall} and by @option{-pedantic}, which can be disabled with
+ @option{-Wno-pointer-sign}.
+
++ at item -Werror-maybe-reset
++ at opindex Werror-maybe-reset
++Act like @samp{-Wno-error} if the @env{GCC_NO_WERROR} environment
++variable is set to anything other than 0 or empty.
++
++ at item -fhonour-copts
++ at opindex fhonour-copts
++If @env{GCC_HONOUR_COPTS} is set to 1, abort if this option is not
++given at least once, and warn if it is given more than once.
++If @env{GCC_HONOUR_COPTS} is set to 2, abort if this option is not
++given exactly once.
++If @env{GCC_HONOUR_COPTS} is set to 0 or unset, warn if this option
++is not given exactly once.
++The warning is quelled if @env{GCC_HONOUR_COPTS} is set to @samp{s}.
++This flag and environment variable only affect the C language.
++
+ @item -Wstack-protector
+ @opindex Wstack-protector
+ @opindex Wno-stack-protector
+@@ -5721,7 +5737,7 @@
+ second branch or a point immediately following it, depending on whether
+ the condition is known to be true or false.
+
+-Enabled at levels @option{-O2}, @option{-O3}, @option{-Os}.
++Enabled at levels @option{-O3}.
+
+ @item -fsplit-wide-types
+ @opindex fsplit-wide-types
+@@ -5866,7 +5882,7 @@
+ @option{-fno-delete-null-pointer-checks} to disable this optimization
+ for programs which depend on that behavior.
+
+-Enabled at levels @option{-O2}, @option{-O3}, @option{-Os}.
++Enabled at levels @option{-O3}.
+
+ @item -fexpensive-optimizations
+ @opindex fexpensive-optimizations
+diff -Nur gcc-4.4.7.orig/gcc/java/jvspec.c gcc-4.4.7/gcc/java/jvspec.c
+--- gcc-4.4.7.orig/gcc/java/jvspec.c 2007-07-31 18:19:49.000000000 +0200
++++ gcc-4.4.7/gcc/java/jvspec.c 2014-09-09 19:50:59.000000000 +0200
+@@ -670,6 +670,7 @@
+ class name. Append dummy `.c' that can be stripped by set_input so %b
+ is correct. */
+ set_input (concat (main_class_name, "main.c", NULL));
++ putenv ("GCC_HONOUR_COPTS=s"); /* XXX hack! */
+ err = do_spec (jvgenmain_spec);
+ if (err == 0)
+ {
+diff -Nur gcc-4.4.7.orig/gcc/opts.c gcc-4.4.7/gcc/opts.c
+--- gcc-4.4.7.orig/gcc/opts.c 2009-11-27 12:34:32.000000000 +0100
++++ gcc-4.4.7/gcc/opts.c 2014-09-09 19:50:59.000000000 +0200
+@@ -898,9 +898,6 @@
+ flag_schedule_insns_after_reload = opt2;
+ #endif
+ flag_regmove = opt2;
+- flag_strict_aliasing = opt2;
+- flag_strict_overflow = opt2;
+- flag_delete_null_pointer_checks = opt2;
+ flag_reorder_blocks = opt2;
+ flag_reorder_functions = opt2;
+ flag_tree_vrp = opt2;
+@@ -924,6 +921,9 @@
+
+ /* -O3 optimizations. */
+ opt3 = (optimize >= 3);
++ flag_strict_aliasing = opt3;
++ flag_strict_overflow = opt3;
++ flag_delete_null_pointer_checks = opt3;
+ flag_predictive_commoning = opt3;
+ flag_inline_functions = opt3;
+ flag_unswitch_loops = opt3;
+@@ -1603,6 +1603,17 @@
+ enable_warning_as_error (arg, value, lang_mask);
+ break;
+
++ case OPT_Werror_maybe_reset:
++ {
++ char *ev = getenv ("GCC_NO_WERROR");
++ if ((ev != NULL) && (*ev != '0'))
++ warnings_are_errors = 0;
++ }
++ break;
++
++ case OPT_fhonour_copts:
++ break;
++
+ case OPT_Wextra:
+ set_Wextra (value);
+ break;