aboutsummaryrefslogtreecommitdiffstats
path: root/toolchain/gcc/patches/4.4.7/931-avr32_disable_shifted_data_opt.patch
blob: 2003e97ae07c612e9a4f18dd06eceb082acc1255 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
--- a/gcc/config/avr32/avr32.c
+++ b/gcc/config/avr32/avr32.c
@@ -6726,7 +6726,28 @@ avr32_reorg_optimization (void)
 	}
     }
 
-  if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
+  /* Disabled this optimization since it has a bug */
+  /* In the case where the data instruction the shifted insn gets folded
+   * into is a branch destination, this breaks, i.e.
+   *
+   *    add r8, r10, r8 << 2
+   * 1:
+   *    ld.w r11, r8[0]
+   *    ...
+   *    mov r8, sp
+   *    rjmp 1b
+   *
+   * gets folded to:
+   *
+   * 1:
+   *    ld.w r11, r10[r8 << 2]
+   *    ...
+   *    mov r8, sp
+   *    rjmp 1b
+   *
+   * which is clearly wrong..
+   */
+  if (0 && TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
     {
 
       /* Scan through all insns looking for shifted add operations */