summaryrefslogtreecommitdiffstats
path: root/c/src/lib/libcpu/m68k/mcf532x/cache/cachepd.c
blob: 5a93ea9d6c7bd0b96e9f2e820dd3769190174378 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
/**
 *  @file
 *
 *  Cache Management Support Routines for the MCF532x
 */

#include <rtems.h>
#include <mcf532x/mcf532x.h>
#include "cache_.h"

#define m68k_set_cacr(_cacr) \
  __asm__ volatile ("movec %0,%%cacr" : : "d" (_cacr))

/*
 * Read/write copy of common cache
 *  Default cache mode is *disabled* (cache only ACRx areas)
 *  Allow CPUSHL to invalidate a cache line
 *  Enable store buffer
 */
static uint32_t cacr_mode = MCF_CACR_ESB |
                              MCF_CACR_DCM(3);

/*
 * Cannot be frozen
 */
void _CPU_cache_freeze_data(void)
{
}

void _CPU_cache_unfreeze_data(void)
{
}

void _CPU_cache_freeze_instruction(void)
{
}

void _CPU_cache_unfreeze_instruction(void)
{
}

void _CPU_cache_flush_1_data_line(const void *d_addr)
{
  register unsigned long adr = (((unsigned long) d_addr >> 4) & 0xff) << 4;

  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
  adr += 1;
  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
  adr += 1;
  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
  adr += 1;
  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
}

void _CPU_cache_flush_entire_data(void)
{
  register unsigned long set, adr;

  for(set = 0; set < 256; ++set) {
    adr = (set << 4);
    __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
    adr += 1;
    __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
    adr += 1;
    __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
    adr += 1;
    __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
  }
}

void _CPU_cache_enable_instruction(void)
{
  rtems_interrupt_level level;

  rtems_interrupt_disable(level);
  if(!(cacr_mode & MCF_CACR_CENB))
  {
    cacr_mode |= MCF_CACR_CENB;
    m68k_set_cacr(cacr_mode);
  }
  rtems_interrupt_enable(level);
}

void _CPU_cache_disable_instruction(void)
{
  rtems_interrupt_level level;

  rtems_interrupt_disable(level);
  if((cacr_mode & MCF_CACR_CENB))
  {
    cacr_mode &= ~MCF_CACR_CENB;
    m68k_set_cacr(cacr_mode);
  }
  rtems_interrupt_enable(level);
}

void _CPU_cache_invalidate_entire_instruction(void)
{
  m68k_set_cacr(cacr_mode | MCF_CACR_CINVA);
}

void _CPU_cache_invalidate_1_instruction_line(const void *addr)
{
  register unsigned long adr = (((unsigned long) addr >> 4) & 0xff) << 4;

  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
  adr += 1;
  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
  adr += 1;
  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
  adr += 1;
  __asm__ volatile ("cpushl %%bc,(%0)" :: "a" (adr));
}

void _CPU_cache_enable_data(void)
{
  /*
   * The 532x has a unified data and instruction cache, so we call through
   * to enable instruction.
   */
  _CPU_cache_enable_instruction();
}

void _CPU_cache_disable_data(void)
{
  /*
   * The 532x has a unified data and instruction cache, so we call through
   * to disable instruction.
   */
  _CPU_cache_disable_instruction();
}

void _CPU_cache_invalidate_entire_data(void)
{
  _CPU_cache_invalidate_entire_instruction();
}

void _CPU_cache_invalidate_1_data_line(const void *addr)
{
  _CPU_cache_invalidate_1_instruction_line(addr);
}