From f715433ae9ee51d496d5c4a0e9b5a7f74bc14268 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Tue, 4 Feb 2014 16:44:50 +0100 Subject: bsps/sparc: Order load/store by increasing offsets This may increase the cache hit performance. --- c/src/lib/libbsp/sparc/shared/irq_asm.S | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/c/src/lib/libbsp/sparc/shared/irq_asm.S b/c/src/lib/libbsp/sparc/shared/irq_asm.S index 773a8ea4b1..de7ef90c95 100644 --- a/c/src/lib/libbsp/sparc/shared/irq_asm.S +++ b/c/src/lib/libbsp/sparc/shared/irq_asm.S @@ -422,15 +422,15 @@ save_isf: GET_SELF_CPU_CONTROL %l5, %l7 - ld [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6 - ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7 - - add %l6, 1, %l6 - st %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7 + ld [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6 add %l7, 1, %l7 st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL] + add %l6, 1, %l6 + st %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] + /* * If ISR nest level was zero (now 1), then switch stack. */ @@ -581,11 +581,11 @@ dont_fix_pil2: * l7 = _ISR_Nest_level value */ + st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL] + sub %l6, 1, %l6 st %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL] - st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL] - /* * If dispatching is disabled (includes nested interrupt case), * then do a "simple" exit. -- cgit v1.2.3